diff options
Diffstat (limited to 'roles')
244 files changed, 14787 insertions, 2120 deletions
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index ba2f7293b..f869b5fae 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -72,6 +72,15 @@ - apiGroups: ["image.openshift.io", ""] resources: ["images"] verbs: ["get", "list"] + - apiGroups: ["network.openshift.io"] + resources: ["clusternetworks", "netnamespaces"] + verbs: ["get"] + - apiGroups: ["network.openshift.io"] + resources: ["netnamespaces"] + verbs: ["update"] + - apiGroups: ["networking.k8s.io"] + resources: ["networkpolicies"] + verbs: ["create", "delete"] - name: Create asb-access cluster role oc_clusterrole: @@ -366,6 +375,11 @@ secret: secretName: etcd-auth-secret +- name: set auth name and type facts if needed + set_fact: + ansible_service_broker_registry_auth_type: "secret" + ansible_service_broker_registry_auth_name: "asb-registry-auth" + when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != "" # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: - name: Create config map for ansible-service-broker @@ -393,6 +407,8 @@ org: {{ ansible_service_broker_registry_organization }} tag: {{ ansible_service_broker_registry_tag }} white_list: {{ ansible_service_broker_registry_whitelist | to_yaml }} + auth_type: "{{ ansible_service_broker_registry_auth_type | default("") }}" + auth_name: "{{ ansible_service_broker_registry_auth_name | default("") }}" - type: local_openshift name: localregistry namespaces: ['openshift'] @@ -438,6 +454,7 @@ data: "{{ ansible_service_broker_registry_user }}" - path: password data: "{{ ansible_service_broker_registry_password }}" + when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != "" - name: Create the Broker resource in the catalog oc_obj: diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml index 05415a4d6..834ebba64 100644 --- a/roles/calico_master/tasks/main.yml +++ b/roles/calico_master/tasks/main.yml @@ -23,7 +23,7 @@ -f {{ mktemp.stdout }}/calico-policy-controller.yml --config={{ openshift.common.config_base }}/master/admin.kubeconfig register: calico_create_output - failed_when: ('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout) + failed_when: "('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout) and calico_create_output.rc != 0" changed_when: ('created' in calico_create_output.stdout) - name: Calico Master | Delete temp directory diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml index d0e37e2f4..7397e2bec 100644 --- a/roles/container_runtime/defaults/main.yml +++ b/roles/container_runtime/defaults/main.yml @@ -64,7 +64,7 @@ docker_storage_setup_options: root_lv_mount_path: "{{ docker_storage_path }}" docker_storage_extra_options: - "--storage-opt overlay2.override_kernel_check=true" -- "--storage-opt overlay2.size={{ docker_storage_size }}" +- "{{ '--storage-opt overlay2.size=' ~ docker_storage_size if container_runtime_docker_storage_setup_device is defined and container_runtime_docker_storage_setup_device != '' else '' }}" - "--graph={{ docker_storage_path}}" @@ -101,45 +101,34 @@ l_crt_crio_image_tag_dict: openshift-enterprise: "{{ l_openshift_image_tag }}" origin: "{{ openshift_crio_image_tag | default(openshift_crio_image_tag_default) }}" -l_crt_crio_image_prepend_dict: - openshift-enterprise: "registry.access.redhat.com/openshift3" - origin: "docker.io/gscrivano" - l_crt_crio_image_dict: - Fedora: - crio_image_name: "cri-o-fedora" - crio_image_tag: "latest" - CentOS: - crio_image_name: "cri-o-centos" - crio_image_tag: "latest" - RedHat: - crio_image_name: "cri-o" - crio_image_tag: "{{ openshift_crio_image_tag | default(l_crt_crio_image_tag_dict[openshift_deployment_type]) }}" - -l_crio_image_prepend: "{{ l_crt_crio_image_prepend_dict[openshift_deployment_type] }}" -l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution]['crio_image_name'] }}" -l_crio_image_tag: "{{ l_crt_crio_image_dict[ansible_distribution] }}" - -l_crio_image_default: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}" + Fedora: "registry.fedoraproject.org/latest/cri-o" + CentOS: "registry.centos.org/projectatomic/cri-o" + RedHat: "registry.access.redhat.com/openshift3/cri-o" + +l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution] }}" +l_crio_image_tag: "{{ l_crt_crio_image_tag_dict[openshift_deployment_type] }}" + +l_crio_image_default: "{{ l_crio_image_name }}:{{ l_crio_image_tag }}" l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio_image_default) }}" # ----------------------- # # systemcontainers_docker # # ----------------------- # -l_crt_docker_image_prepend_dict: - Fedora: "registry.fedoraproject.org/latest" - Centos: "docker.io/gscrivano" - RedHat: "registry.access.redhat.com/openshift3" +l_crt_docker_image_dict: + Fedora: "registry.fedoraproject.org/latest/docker" + CentOS: "registry.centos.org/projectatomic/docker" + RedHat: "registry.access.redhat.com/openshift3/container-engine" openshift_docker_image_tag_default: "latest" l_crt_docker_image_tag_dict: openshift-enterprise: "{{ l_openshift_image_tag }}" origin: "{{ openshift_docker_image_tag | default(openshift_docker_image_tag_default) }}" -l_docker_image_prepend: "{{ l_crt_docker_image_prepend_dict[ansible_distribution] }}" +l_docker_image_prepend: "{{ l_crt_docker_image_dict[ansible_distribution] }}" l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }}" -l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}" +l_docker_image_default: "{{ l_docker_image_prepend }}:{{ l_docker_image_tag }}" l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}" l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml index d6e7e7fed..ed9a2709b 100644 --- a/roles/container_runtime/tasks/package_docker.yml +++ b/roles/container_runtime/tasks/package_docker.yml @@ -1,6 +1,17 @@ --- - include_tasks: common/pre.yml +# In some cases, some services may be run as containers and docker may still +# be installed via rpm. +- include_tasks: common/atomic_proxy.yml + when: + - > + (openshift_use_system_containers | default(False)) | bool + or (openshift_use_etcd_system_container | default(False)) | bool + or (openshift_use_openvswitch_system_container | default(False)) | bool + or (openshift_use_node_system_container | default(False)) | bool + or (openshift_use_master_system_container | default(False)) | bool + - name: Get current installed Docker version command: "{{ repoquery_installed }} --qf '%{version}' docker" when: not openshift_is_atomic | bool diff --git a/roles/container_runtime/templates/docker_storage_setup.j2 b/roles/container_runtime/templates/docker_storage_setup.j2 index b056087e0..ec540ea44 100644 --- a/roles/container_runtime/templates/docker_storage_setup.j2 +++ b/roles/container_runtime/templates/docker_storage_setup.j2 @@ -2,6 +2,7 @@ # /usr/lib/docker-storage-setup/docker-storage-setup. # # For more details refer to "man docker-storage-setup" +{% if container_runtime_docker_storage_setup_device is defined and container_runtime_docker_storage_setup_device != '' %} DEVS={{ container_runtime_docker_storage_setup_device }} VG={{ docker_storage_setup_options.vg }} DATA_SIZE={{ docker_storage_setup_options.data_size }} @@ -9,4 +10,7 @@ STORAGE_DRIVER="{{ docker_storage_setup_options.storage_driver }}" CONTAINER_ROOT_LV_NAME="{{ docker_storage_setup_options.root_lv_name }}" CONTAINER_ROOT_LV_SIZE="{{ docker_storage_setup_options.root_lv_size }}" CONTAINER_ROOT_LV_MOUNT_PATH="{{ docker_storage_setup_options.root_lv_mount_path }}" +{% else %} +STORAGE_DRIVER="{{ docker_storage_setup_options.storage_driver }}" +{% endif %} EXTRA_STORAGE_OPTIONS="{{ docker_storage_extra_options | join(' ') }}" diff --git a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml index 881a8c270..cab835e20 100644 --- a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml +++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml @@ -1,7 +1,7 @@ --- - name: Install etcd for etcdctl package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present - when: not openshift_is_atomic | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml index 7634b8192..38d2f748b 100644 --- a/roles/flannel/meta/main.yml +++ b/roles/flannel/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info: - system dependencies: - role: lib_utils +- role: openshift_facts diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index da7e7b1da..a38b95c1d 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -127,6 +127,10 @@ class CallbackModule(CallbackBase): self._display.display( '\tThis phase can be restarted by running: {}'.format( phase_attributes[phase]['playbook'])) + if 'message' in stats.custom['_run'][phase]: + self._display.display( + '\t{}'.format( + stats.custom['_run'][phase]['message'])) self._display.display("", screen_only=True) diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml index 08f2d5adc..41d0ead20 100644 --- a/roles/kuryr/tasks/node.yaml +++ b/roles/kuryr/tasks/node.yaml @@ -40,7 +40,7 @@ regexp: '^OPTIONS="?(.*?)"?$' backrefs: yes backup: yes - line: 'OPTIONS="\1 --disable dns,proxy,plugins"' + line: 'OPTIONS="\1 --disable proxy"' - name: force node restart to disable the proxy service: diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2 index 39348ae90..09f4c7dfe 100644 --- a/roles/kuryr/templates/cni-daemonset.yaml.j2 +++ b/roles/kuryr/templates/cni-daemonset.yaml.j2 @@ -26,6 +26,13 @@ spec: image: kuryr/cni:latest imagePullPolicy: IfNotPresent command: [ "cni_ds_init" ] + env: + - name: CNI_DAEMON + value: "True" + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName securityContext: privileged: true volumeMounts: @@ -38,6 +45,10 @@ spec: subPath: kuryr-cni.conf - name: etc mountPath: /etc + - name: proc + mountPath: /host_proc + - name: openvswitch + mountPath: /var/run/openvswitch volumes: - name: bin hostPath: @@ -50,4 +61,10 @@ spec: name: kuryr-config - name: etc hostPath: - path: /etc
\ No newline at end of file + path: /etc + - name: proc + hostPath: + path: /proc + - name: openvswitch + hostPath: + path: /var/run/openvswitch diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2 index 96c215f00..4bf1dbddf 100644 --- a/roles/kuryr/templates/configmap.yaml.j2 +++ b/roles/kuryr/templates/configmap.yaml.j2 @@ -16,17 +16,17 @@ data: # Directory for Kuryr vif binding executables. (string value) #bindir = /usr/libexec/kuryr + # Neutron subnetpool name will be prefixed by this. (string value) + #subnetpool_name_prefix = kuryrPool + + # baremetal or nested-containers are the supported values. (string value) + #deployment_type = baremetal + # If set to true, the logging level will be set to DEBUG instead of the default # INFO level. (boolean value) # Note: This option can be changed without restarting. #debug = false - # DEPRECATED: If set to false, the logging level will be set to WARNING instead - # of the default INFO level. (boolean value) - # This option is deprecated for removal. - # Its value may be silently ignored in the future. - #verbose = true - # The name of a logging configuration file. This file is appended to any # existing logging configuration files. For details about logging configuration # files, see the Python logging module documentation. Note that when logging @@ -46,7 +46,7 @@ data: # logging will go to stderr as defined by use_stderr. This option is ignored if # log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile - #log_file = /var/log/kuryr/kuryr-controller.log + #log_file = <None> # (Optional) The base directory used for relative log_file paths. This option # is ignored if log_config_append is set. (string value) @@ -65,13 +65,19 @@ data: # is set. (boolean value) #use_syslog = false + # Enable journald for logging. If running in a systemd environment you may wish + # to enable journal support. Doing so will use the journal native protocol + # which includes structured metadata in addition to log messages.This option is + # ignored if log_config_append is set. (boolean value) + #use_journal = false + # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Log output to standard error. This option is ignored if log_config_append is # set. (boolean value) - #use_stderr = true + #use_stderr = false # Format string to use for log messages with context. (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s @@ -93,7 +99,7 @@ data: # List of package logging levels in logger=LEVEL pairs. This option is ignored # if log_config_append is set. (list value) - #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO + #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false @@ -106,15 +112,86 @@ data: # value) #instance_uuid_format = "[instance: %(uuid)s] " + # Interval, number of seconds, of log rate limiting. (integer value) + #rate_limit_interval = 0 + + # Maximum number of logged messages per rate_limit_interval. (integer value) + #rate_limit_burst = 0 + + # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG + # or empty string. Logs with level greater or equal to rate_limit_except_level + # are not filtered. An empty string means that all levels are filtered. (string + # value) + #rate_limit_except_level = CRITICAL + # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false [binding] + # Configuration options for container interface binding. - driver = kuryr.lib.binding.drivers.vlan + # + # From kuryr_kubernetes + # + + # The name prefix of the veth endpoint put inside the container. (string value) + #veth_dst_prefix = eth + + # Driver to use for binding and unbinding ports. (string value) + # Deprecated group/name - [binding]/driver + #default_driver = kuryr.lib.binding.drivers.veth + + # Drivers to use for binding and unbinding ports. (list value) + #enabled_drivers = kuryr.lib.binding.drivers.veth + + # Specifies the name of the Nova instance interface to link the virtual devices + # to (only applicable to some binding drivers. (string value) link_iface = eth0 + driver = kuryr.lib.binding.drivers.vlan + + + [cni_daemon] + + # + # From kuryr_kubernetes + # + + # Enable CNI Daemon configuration. (boolean value) + daemon_enabled = true + + # Bind address for CNI daemon HTTP server. It is recommened to allow only local + # connections. (string value) + bind_address = 127.0.0.1:50036 + + # Maximum number of processes that will be spawned to process requests from CNI + # driver. (integer value) + #worker_num = 30 + + # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in + # pod metadata before failing the CNI request. (integer value) + #vif_annotation_timeout = 120 + + # Kuryr uses pyroute2 library to manipulate networking interfaces. When + # processing a high number of Kuryr requests in parallel, it may take kernel + # more time to process all networking stack changes. This option allows to tune + # internal pyroute2 timeout. (integer value) + #pyroute2_timeout = 30 + + # Set to True when you are running kuryr-daemon inside a Docker container on + # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to + # provide networking for. This mainly means thatkuryr-daemon will look for + # network namespaces in $netns_proc_dir instead of /proc. (boolean value) + docker_mode = true + + # When docker_mode is set to True, this config option should be set to where + # host's /proc directory is mounted. Please note that mounting it is necessary + # to allow Kuryr-Kubernetes to move host interfaces between host network + # namespaces, which is essential for Kuryr to work. (string value) + netns_proc_dir = /host_proc + + [kubernetes] # @@ -164,11 +241,6 @@ data: # The driver that manages VIFs pools for Kubernetes Pods (string value) vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }} - [vif_pool] - ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }} - ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }} - ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }} - ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }} [neutron] # Configuration options for OpenStack Neutron @@ -232,13 +304,55 @@ data: external_svc_subnet = {{ kuryr_openstack_external_svc_subnet_id }} [pod_vif_nested] + worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }} + + + [pool_manager] + + # + # From kuryr_kubernetes + # + + # Absolute path to socket file that will be used for communication with the + # Pool Manager daemon (string value) + #sock_file = /run/kuryr/kuryr_manage.sock + + + [vif_pool] + + # + # From kuryr_kubernetes + # + + # Set a maximun amount of ports per pool. 0 to disable (integer value) + ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }} + + # Set a target minimum size of the pool of ports (integer value) + ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }} + + # Number of ports to be created in a bulk request (integer value) + ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }} + + # Minimun interval (in seconds) between pool updates (integer value) + ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }} + kuryr-cni.conf: |+ [DEFAULT] # # From kuryr_kubernetes # + + # Directory for Kuryr vif binding executables. (string value) + #bindir = /usr/libexec/kuryr + + # Neutron subnetpool name will be prefixed by this. (string value) + #subnetpool_name_prefix = kuryrPool + + # baremetal or nested-containers are the supported values. (string value) + #deployment_type = baremetal + # If set to true, the logging level will be set to DEBUG instead of the default # INFO level. (boolean value) # Note: This option can be changed without restarting. @@ -263,7 +377,7 @@ data: # logging will go to stderr as defined by use_stderr. This option is ignored if # log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile - #log_file = /var/log/kuryr/cni.log + #log_file = <None> # (Optional) The base directory used for relative log_file paths. This option # is ignored if log_config_append is set. (string value) @@ -282,6 +396,12 @@ data: # is set. (boolean value) #use_syslog = false + # Enable journald for logging. If running in a systemd environment you may wish + # to enable journal support. Doing so will use the journal native protocol + # which includes structured metadata in addition to log messages.This option is + # ignored if log_config_append is set. (boolean value) + #use_journal = false + # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER @@ -310,7 +430,7 @@ data: # List of package logging levels in logger=LEVEL pairs. This option is ignored # if log_config_append is set. (list value) - #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO + #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false @@ -323,14 +443,85 @@ data: # value) #instance_uuid_format = "[instance: %(uuid)s] " + # Interval, number of seconds, of log rate limiting. (integer value) + #rate_limit_interval = 0 + + # Maximum number of logged messages per rate_limit_interval. (integer value) + #rate_limit_burst = 0 + + # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG + # or empty string. Logs with level greater or equal to rate_limit_except_level + # are not filtered. An empty string means that all levels are filtered. (string + # value) + #rate_limit_except_level = CRITICAL + # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false [binding] + # Configuration options for container interface binding. + + # + # From kuryr_kubernetes + # + + # The name prefix of the veth endpoint put inside the container. (string value) + #veth_dst_prefix = eth + + # Driver to use for binding and unbinding ports. (string value) + # Deprecated group/name - [binding]/driver + #default_driver = kuryr.lib.binding.drivers.veth + + # Drivers to use for binding and unbinding ports. (list value) + #enabled_drivers = kuryr.lib.binding.drivers.veth + + # Specifies the name of the Nova instance interface to link the virtual devices + # to (only applicable to some binding drivers. (string value) + link_iface = eth0 driver = kuryr.lib.binding.drivers.vlan - link_iface = {{ kuryr_cni_link_interface }} + + + [cni_daemon] + + # + # From kuryr_kubernetes + # + + # Enable CNI Daemon configuration. (boolean value) + daemon_enabled = true + + # Bind address for CNI daemon HTTP server. It is recommened to allow only local + # connections. (string value) + bind_address = 127.0.0.1:50036 + + # Maximum number of processes that will be spawned to process requests from CNI + # driver. (integer value) + #worker_num = 30 + + # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in + # pod metadata before failing the CNI request. (integer value) + #vif_annotation_timeout = 120 + + # Kuryr uses pyroute2 library to manipulate networking interfaces. When + # processing a high number of Kuryr requests in parallel, it may take kernel + # more time to process all networking stack changes. This option allows to tune + # internal pyroute2 timeout. (integer value) + #pyroute2_timeout = 30 + + # Set to True when you are running kuryr-daemon inside a Docker container on + # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to + # provide networking for. This mainly means thatkuryr-daemon will look for + # network namespaces in $netns_proc_dir instead of /proc. (boolean value) + docker_mode = true + + # When docker_mode is set to True, this config option should be set to where + # host's /proc directory is mounted. Please note that mounting it is necessary + # to allow Kuryr-Kubernetes to move host interfaces between host network + # namespaces, which is essential for Kuryr to work. (string value) + netns_proc_dir = /host_proc + [kubernetes] @@ -341,12 +532,136 @@ data: # The root URL of the Kubernetes API (string value) api_root = {{ openshift.master.api_url }} - # The token to talk to the k8s API - token_file = /etc/kuryr/token + # Absolute path to client cert to connect to HTTPS K8S_API (string value) + # ssl_client_crt_file = /etc/kuryr/controller.crt + + # Absolute path client key file to connect to HTTPS K8S_API (string value) + # ssl_client_key_file = /etc/kuryr/controller.key # Absolute path to ca cert file to connect to HTTPS K8S_API (string value) - ssl_ca_crt_file = /etc/kuryr/ca.crt + ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + + # The token to talk to the k8s API + token_file = /var/run/secrets/kubernetes.io/serviceaccount/token # HTTPS K8S_API server identity verification (boolean value) # TODO (apuimedo): Make configurable ssl_verify_server_crt = True + + # The driver to determine OpenStack project for pod ports (string value) + pod_project_driver = default + + # The driver to determine OpenStack project for services (string value) + service_project_driver = default + + # The driver to determine Neutron subnets for pod ports (string value) + pod_subnets_driver = default + + # The driver to determine Neutron subnets for services (string value) + service_subnets_driver = default + + # The driver to determine Neutron security groups for pods (string value) + pod_security_groups_driver = default + + # The driver to determine Neutron security groups for services (string value) + service_security_groups_driver = default + + # The driver that provides VIFs for Kubernetes Pods. (string value) + pod_vif_driver = nested-vlan + + # The driver that manages VIFs pools for Kubernetes Pods (string value) + vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }} + + [neutron] + # Configuration options for OpenStack Neutron + + # + # From kuryr_kubernetes + # + + # Authentication URL (string value) + auth_url = {{ kuryr_openstack_auth_url }} + + # Authentication type to load (string value) + # Deprecated group/name - [neutron]/auth_plugin + auth_type = password + + # Domain ID to scope to (string value) + user_domain_name = {{ kuryr_openstack_user_domain_name }} + + # User's password (string value) + password = {{ kuryr_openstack_password }} + + # Domain name containing project (string value) + project_domain_name = {{ kuryr_openstack_project_domain_name }} + + # Project ID to scope to (string value) + # Deprecated group/name - [neutron]/tenant-id + project_id = {{ kuryr_openstack_project_id }} + + # Token (string value) + #token = <None> + + # Trust ID (string value) + #trust_id = <None> + + # User's domain id (string value) + #user_domain_id = <None> + + # User id (string value) + #user_id = <None> + + # Username (string value) + # Deprecated group/name - [neutron]/user-name + username = {{kuryr_openstack_username }} + + # Whether a plugging operation is failed if the port to plug does not become + # active (boolean value) + #vif_plugging_is_fatal = false + + # Seconds to wait for port to become active (integer value) + #vif_plugging_timeout = 0 + + [neutron_defaults] + + pod_security_groups = {{ kuryr_openstack_pod_sg_id }} + pod_subnet = {{ kuryr_openstack_pod_subnet_id }} + service_subnet = {{ kuryr_openstack_service_subnet_id }} + project = {{ kuryr_openstack_pod_project_id }} + # TODO (apuimedo): Remove the duplicated line just after this one once the + # RDO packaging contains the upstream patch + worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }} + + [pod_vif_nested] + + worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }} + + + [pool_manager] + + # + # From kuryr_kubernetes + # + + # Absolute path to socket file that will be used for communication with the + # Pool Manager daemon (string value) + #sock_file = /run/kuryr/kuryr_manage.sock + + + [vif_pool] + + # + # From kuryr_kubernetes + # + + # Set a maximun amount of ports per pool. 0 to disable (integer value) + ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }} + + # Set a target minimum size of the pool of ports (integer value) + ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }} + + # Number of ports to be created in a bulk request (integer value) + ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }} + + # Minimun interval (in seconds) between pool updates (integer value) + ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }} diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index 1b63a6c13..72023eaf7 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -1485,7 +1485,7 @@ class OCGroup(OpenShiftCLI): def needs_update(self): ''' verify an update is needed ''' - return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=[], debug=True) + return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=['users'], debug=True) # pylint: disable=too-many-return-statements,too-many-branches @staticmethod diff --git a/roles/lib_openshift/src/class/oc_group.py b/roles/lib_openshift/src/class/oc_group.py index 89fb09ea4..53e6b6766 100644 --- a/roles/lib_openshift/src/class/oc_group.py +++ b/roles/lib_openshift/src/class/oc_group.py @@ -59,7 +59,7 @@ class OCGroup(OpenShiftCLI): def needs_update(self): ''' verify an update is needed ''' - return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=[], debug=True) + return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=['users'], debug=True) # pylint: disable=too-many-return-statements,too-many-branches @staticmethod diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py index fc14b5633..574743ff1 100644 --- a/roles/lib_utils/filter_plugins/oo_filters.py +++ b/roles/lib_utils/filter_plugins/oo_filters.py @@ -4,6 +4,7 @@ """ Custom filters for use in openshift-ansible """ +import json import os import pdb import random @@ -21,13 +22,10 @@ import yaml from ansible import errors from ansible.parsing.yaml.dumper import AnsibleDumper -# ansible.compat.six goes away with Ansible 2.4 -try: - from ansible.compat.six import string_types, u - from ansible.compat.six.moves.urllib.parse import urlparse -except ImportError: - from ansible.module_utils.six import string_types, u - from ansible.module_utils.six.moves.urllib.parse import urlparse +# pylint: disable=import-error,no-name-in-module +from ansible.module_utils.six import string_types, u +# pylint: disable=import-error,no-name-in-module +from ansible.module_utils.six.moves.urllib.parse import urlparse HAS_OPENSSL = False try: @@ -274,7 +272,7 @@ def haproxy_backend_masters(hosts, port): return servers -# pylint: disable=too-many-branches +# pylint: disable=too-many-branches, too-many-nested-blocks def lib_utils_oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames): """ Parses names from list of certificate hashes. @@ -320,8 +318,9 @@ def lib_utils_oo_parse_named_certificates(certificates, named_certs_dir, interna certificate['names'].append(str(cert.get_subject().commonName.decode())) for i in range(cert.get_extension_count()): if cert.get_extension(i).get_short_name() == 'subjectAltName': - for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '): - certificate['names'].append(name) + for name in str(cert.get_extension(i)).split(', '): + if 'DNS:' in name: + certificate['names'].append(name.replace('DNS:', '')) except Exception: raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] + "please specify certificate names in host inventory")) @@ -343,6 +342,58 @@ def lib_utils_oo_parse_named_certificates(certificates, named_certs_dir, interna return certificates +def lib_utils_oo_parse_certificate_san(certificate): + """ Parses SubjectAlternativeNames from a PEM certificate. + + Ex: certificate = '''-----BEGIN CERTIFICATE----- + MIIEcjCCAlqgAwIBAgIBAzANBgkqhkiG9w0BAQsFADAhMR8wHQYDVQQDDBZldGNk + LXNpZ25lckAxNTE2ODIwNTg1MB4XDTE4MDEyNDE5MDMzM1oXDTIzMDEyMzE5MDMz + M1owHzEdMBsGA1UEAwwUbWFzdGVyMS5hYnV0Y2hlci5jb20wggEiMA0GCSqGSIb3 + DQEBAQUAA4IBDwAwggEKAoIBAQD4wBdWXNI3TF1M0b0bEIGyJPvdqKeGwF5XlxWg + NoA1Ain/Xz0N1SW5pXW2CDo9HX+ay8DyhzR532yrBa+RO3ivNCmfnexTQinfSLWG + mBEdiu7HO3puR/GNm74JNyXoEKlMAIRiTGq9HPoTo7tNV5MLodgYirpHrkSutOww + DfFSrNjH/ehqxwQtrIOnTAHigdTOrKVdoYxqXblDEMONTPLI5LMvm4/BqnAVaOyb + 9RUzND6lxU/ei3FbUS5IoeASOHx0l1ifxae3OeSNAimm/RIRo9rieFNUFh45TzID + elsdGrLB75LH/gnRVV1xxVbwPN6xW1mEwOceRMuhIArJQ2G5AgMBAAGjgbYwgbMw + UQYDVR0jBEowSIAUXTqN88vCI6E7wONls3QJ4/63unOhJaQjMCExHzAdBgNVBAMM + FmV0Y2Qtc2lnbmVyQDE1MTY4MjA1ODWCCQDMaopfom6OljAMBgNVHRMBAf8EAjAA + MBMGA1UdJQQMMAoGCCsGAQUFBwMBMAsGA1UdDwQEAwIFoDAdBgNVHQ4EFgQU7l05 + OYeY3HppL6/0VJSirudj8t0wDwYDVR0RBAgwBocEwKh6ujANBgkqhkiG9w0BAQsF + AAOCAgEAFU8sicE5EeQsUPnFEqDvoJd1cVE+8aCBqkW0++4GsVw2A/JOJ3OBJL6r + BV3b1u8/e8xBNi8hPi42Q+LWBITZZ/COFyhwEAK94hcr7eZLCV2xfUdMJziP4Qkh + /WRN7vXHTtJ6NP/d6A22SPbtnMSt9Y6G8y9qa5HBrqIqmkYbLzDw/SdZbDbuGhRk + xUwg2ahXNblVoE5P6rxPONgXliA94telZ1/61iyrVaiGQb1/GUP/DRfvvR4dOCrA + lMosW6fm37Wdi/8iYW+aDPWGS+yVK/sjSnHNjxqvrzkfGk+COa5riT9hJ7wZY0Hb + YiJS74SZgZt/nnr5PI2zFRUiZLECqCkZnC/sz29i+irLabnq7Cif9Mv+TUcXWvry + TdJuaaYdTSMRSUkDd/c9Ife8tOr1i1xhFzDNKNkZjTVRk1MBquSXndVCDKucdfGi + YoWm+NDFrayw8yxK/KTHo3Db3lu1eIXTHxriodFx898b//hysHr4hs4/tsEFUTZi + 705L2ScIFLfnyaPby5GK/3sBIXtuhOFM3QV3JoYKlJB5T6wJioVoUmSLc+UxZMeE + t9gGVQbVxtLvNHUdW7uKQ5pd76nIJqApQf8wg2Pja8oo56fRZX2XLt8nm9cswcC4 + Y1mDMvtfxglQATwMTuoKGdREuu1mbdb8QqdyQmZuMa72q+ax2kQ= + -----END CERTIFICATE-----''' + + returns ['192.168.122.186'] + """ + + if not HAS_OPENSSL: + raise errors.AnsibleFilterError("|missing OpenSSL python bindings") + + names = [] + + try: + lcert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificate) + for i in range(lcert.get_extension_count()): + if lcert.get_extension(i).get_short_name() == 'subjectAltName': + sanstr = str(lcert.get_extension(i)) + sanstr = sanstr.replace('DNS:', '') + sanstr = sanstr.replace('IP Address:', '') + names = sanstr.split(', ') + except Exception: + raise errors.AnsibleFilterError("|failed to parse certificate") + + return names + + def lib_utils_oo_generate_secret(num_bytes): """ generate a session secret """ @@ -589,6 +640,18 @@ that result to this filter plugin. return secret_name +def lib_utils_oo_l_of_d_to_csv(input_list): + """Map a list of dictionaries, input_list, into a csv string + of json values. + + Example input: + [{'var1': 'val1', 'var2': 'val2'}, {'var1': 'val3', 'var2': 'val4'}] + Example output: + u'{"var1": "val1", "var2": "val2"},{"var1": "val3", "var2": "val4"}' + """ + return ','.join(json.dumps(x) for x in input_list) + + def map_from_pairs(source, delim="="): ''' Returns a dict given the source and delim delimited ''' if source == '': @@ -615,6 +678,7 @@ class FilterModule(object): "lib_utils_oo_dict_to_keqv_list": lib_utils_oo_dict_to_keqv_list, "lib_utils_oo_list_to_dict": lib_utils_oo_list_to_dict, "lib_utils_oo_parse_named_certificates": lib_utils_oo_parse_named_certificates, + "lib_utils_oo_parse_certificate_san": lib_utils_oo_parse_certificate_san, "lib_utils_oo_generate_secret": lib_utils_oo_generate_secret, "lib_utils_oo_pods_match_component": lib_utils_oo_pods_match_component, "lib_utils_oo_image_tag_to_rpm_version": lib_utils_oo_image_tag_to_rpm_version, @@ -626,5 +690,6 @@ class FilterModule(object): "lib_utils_oo_contains_rule": lib_utils_oo_contains_rule, "lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list, "lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets, + "lib_utils_oo_l_of_d_to_csv": lib_utils_oo_l_of_d_to_csv, "map_from_pairs": map_from_pairs } diff --git a/roles/lib_utils/filter_plugins/openshift_aws_filters.py b/roles/lib_utils/filter_plugins/openshift_aws_filters.py index dfcb11da3..f16048056 100644 --- a/roles/lib_utils/filter_plugins/openshift_aws_filters.py +++ b/roles/lib_utils/filter_plugins/openshift_aws_filters.py @@ -67,8 +67,24 @@ class FilterModule(object): return tags + @staticmethod + def get_default_az(subnets): + ''' From a list of subnets/AZs in a specific region (from the VPC + structure), return the AZ that has the key/value + 'default_az=True.' ''' + + for subnet in subnets: + if subnet.get('default_az'): + return subnet['az'] + + # if there was none marked with default_az=True, just return the first + # one. (this does mean we could possible return an item that has + # default_az=False set + return subnets[0]['az'] + def filters(self): ''' returns a mapping of filters to methods ''' return {'build_instance_tags': self.build_instance_tags, + 'get_default_az': self.get_default_az, 'scale_groups_match_capacity': self.scale_groups_match_capacity, 'scale_groups_serial': self.scale_groups_serial} diff --git a/roles/lib_utils/filter_plugins/openshift_master.py b/roles/lib_utils/filter_plugins/openshift_master.py index ff15f693b..e67b19c28 100644 --- a/roles/lib_utils/filter_plugins/openshift_master.py +++ b/roles/lib_utils/filter_plugins/openshift_master.py @@ -10,11 +10,7 @@ from ansible import errors from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.plugins.filter.core import to_bool as ansible_bool -# ansible.compat.six goes away with Ansible 2.4 -try: - from ansible.compat.six import string_types, u -except ImportError: - from ansible.module_utils.six import string_types, u +from ansible.module_utils.six import string_types, u import yaml diff --git a/roles/lib_utils/library/docker_creds.py b/roles/lib_utils/library/docker_creds.py index d4674845e..936fb1c38 100644 --- a/roles/lib_utils/library/docker_creds.py +++ b/roles/lib_utils/library/docker_creds.py @@ -135,7 +135,7 @@ def update_config(docker_config, registry, username, password): docker_config['auths'][registry] = {} # base64 encode our username:password string - encoded_data = base64.b64encode('{}:{}'.format(username, password)) + encoded_data = base64.b64encode('{}:{}'.format(username, password).encode()) # check if the same value is already present for idempotency. if 'auth' in docker_config['auths'][registry]: @@ -148,6 +148,8 @@ def update_config(docker_config, registry, username, password): def write_config(module, docker_config, dest): '''Write updated credentials into dest/config.json''' + if not isinstance(docker_config, dict): + docker_config = docker_config.decode() conf_file_path = os.path.join(dest, 'config.json') try: with open(conf_file_path, 'w') as conf_file: diff --git a/roles/lib_utils/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py index 440b8ec28..efdfcf1c7 100644 --- a/roles/lib_utils/library/openshift_container_binary_sync.py +++ b/roles/lib_utils/library/openshift_container_binary_sync.py @@ -107,7 +107,7 @@ class BinarySyncer(object): self._sync_binary('oc') # Ensure correct symlinks created: - self._sync_symlink('kubectl', 'openshift') + self._sync_symlink('kubectl', 'oc') # Remove old oadm binary if os.path.exists(os.path.join(self.bin_dir, 'oadm')): diff --git a/roles/lib_utils/library/swapoff.py b/roles/lib_utils/library/swapoff.py new file mode 100644 index 000000000..925eeb17d --- /dev/null +++ b/roles/lib_utils/library/swapoff.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# +# Copyright 2017 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess + +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: swapoff + +short_description: Disable swap and comment from /etc/fstab + +version_added: "2.4" + +description: + - This module disables swap and comments entries from /etc/fstab + +author: + - "Michael Gugino <mgugino@redhat.com>" +''' + +EXAMPLES = ''' +# Pass in a message +- name: Disable Swap + swapoff: {} +''' + + +def check_swap_in_fstab(module): + '''Check for uncommented swap entries in fstab''' + res = subprocess.call(['grep', '^[^#].*swap', '/etc/fstab']) + + if res == 2: + # rc 2 == cannot open file. + result = {'failed': True, + 'changed': False, + 'msg': 'unable to read /etc/fstab', + 'state': 'unknown'} + module.fail_json(**result) + elif res == 1: + # No grep match, fstab looks good. + return False + elif res == 0: + # There is an uncommented entry for fstab. + return True + else: + # Some other grep error code, we shouldn't get here. + result = {'failed': True, + 'changed': False, + 'msg': 'unknow problem with grep "^[^#].*swap" /etc/fstab ', + 'state': 'unknown'} + module.fail_json(**result) + + +def check_swapon_status(module): + '''Check if swap is actually in use.''' + try: + res = subprocess.check_output(['swapon', '--show']) + except subprocess.CalledProcessError: + # Some other grep error code, we shouldn't get here. + result = {'failed': True, + 'changed': False, + 'msg': 'unable to execute swapon --show', + 'state': 'unknown'} + module.fail_json(**result) + return 'NAME' in str(res) + + +def comment_swap_fstab(module): + '''Comment out swap lines in /etc/fstab''' + res = subprocess.call(['sed', '-i.bak', 's/^[^#].*swap.*/#&/', '/etc/fstab']) + if res: + result = {'failed': True, + 'changed': False, + 'msg': 'sed failed to comment swap in /etc/fstab', + 'state': 'unknown'} + module.fail_json(**result) + + +def run_swapoff(module, changed): + '''Run swapoff command''' + res = subprocess.call(['swapoff', '--all']) + if res: + result = {'failed': True, + 'changed': changed, + 'msg': 'swapoff --all returned {}'.format(str(res)), + 'state': 'unknown'} + module.fail_json(**result) + + +def run_module(): + '''Run this module''' + module = AnsibleModule( + supports_check_mode=False, + argument_spec={} + ) + changed = False + + swap_fstab_res = check_swap_in_fstab(module) + swap_is_inuse_res = check_swapon_status(module) + + if swap_fstab_res: + comment_swap_fstab(module) + changed = True + + if swap_is_inuse_res: + run_swapoff(module, changed) + changed = True + + result = {'changed': changed} + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/roles/nuage_master/tasks/etcd_certificates.yml b/roles/nuage_master/tasks/etcd_certificates.yml new file mode 100644 index 000000000..99ec27f91 --- /dev/null +++ b/roles/nuage_master/tasks/etcd_certificates.yml @@ -0,0 +1,21 @@ +--- +- name: Generate openshift etcd certs + become: yes + include_role: + name: etcd + tasks_from: client_certificates + vars: + etcd_cert_prefix: nuageEtcd- + etcd_cert_config_dir: "{{ cert_output_dir }}" + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_cert_subdir: "openshift-nuage-{{ openshift.common.hostname }}" + + +- name: Error if etcd certs are not copied + stat: + path: "{{ item }}" + with_items: + - "{{ cert_output_dir }}/nuageEtcd-ca.crt" + - "{{ cert_output_dir }}/nuageEtcd-client.crt" + - "{{ cert_output_dir }}/nuageEtcd-client.key" diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index 29e16b6f8..a1781dc56 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -81,6 +81,7 @@ - nuage.key - nuage.kubeconfig +- include_tasks: etcd_certificates.yml - include_tasks: certificates.yml - name: Install Nuage VSD user certificate @@ -99,7 +100,16 @@ become: yes template: src=nuage-node-config-daemonset.j2 dest=/etc/nuage-node-config-daemonset.yaml owner=root mode=0644 -- name: Add the service account to the privileged scc to have root permissions +- name: Create Nuage Infra Pod daemon set yaml file + become: yes + template: src=nuage-infra-pod-config-daemonset.j2 dest=/etc/nuage-infra-pod-config-daemonset.yaml owner=root mode=0644 + +- name: Add the service account to the privileged scc to have root permissions for kube-system + shell: oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:daemon-set-controller + ignore_errors: true + when: inventory_hostname == groups.oo_first_master.0 + +- name: Add the service account to the privileged scc to have root permissions for openshift-infra shell: oc adm policy add-scc-to-user privileged system:serviceaccount:openshift-infra:daemonset-controller ignore_errors: true when: inventory_hostname == groups.oo_first_master.0 @@ -114,6 +124,11 @@ ignore_errors: true when: inventory_hostname == groups.oo_first_master.0 +- name: Spawn Nuage Infra daemon sets pod + shell: oc create -f /etc/nuage-infra-pod-config-daemonset.yaml + ignore_errors: true + when: inventory_hostname == groups.oo_first_master.0 + - name: Restart daemons command: /bin/true notify: diff --git a/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2 b/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2 new file mode 100755 index 000000000..534a1517f --- /dev/null +++ b/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2 @@ -0,0 +1,39 @@ +# This manifest installs Nuage Infra pod on +# each worker node in an Openshift cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: nuage-infra-ds + namespace: kube-system + labels: + k8s-app: nuage-infra-ds +spec: + selector: + matchLabels: + k8s-app: nuage-infra-ds + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: nuage-infra-ds + spec: + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + operator: Exists + containers: + # This container spawns a Nuage Infra pod + # on each worker node + - name: install-nuage-infra + image: nuage/infra:{{ nuage_infra_container_image_version }} + command: ["/install-nuage-infra-pod.sh"] + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/log + name: log-dir + volumes: + - name: log-dir + hostPath: + path: /var/log diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 index 7be5d6743..3543eeb56 100755 --- a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 +++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 @@ -37,11 +37,14 @@ data: nuageMonServer: URL: 0.0.0.0:9443 certificateDirectory: {{ nuage_master_crt_dir }} + clientCA: "" + serverCertificate: "" + serverKey: "" # etcd config required for HA etcdClientConfig: - ca: {{ nuage_master_crt_dir }}/nuageMonCA.crt - certFile: {{ nuage_master_crt_dir }}/nuageMonServer.crt - keyFile: {{ nuage_master_crt_dir }}/master.etcd-client.key + ca: {{ nuage_master_crt_dir }}/nuageEtcd-ca.crt + certFile: {{ nuage_master_crt_dir }}/nuageEtcd-client.crt + keyFile: {{ nuage_master_crt_dir }}/nuageEtcd-client.key urls: {% for etcd_url in openshift.master.etcd_urls %} - {{ etcd_url }} diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 index 6a1267d94..996a2d2b0 100755 --- a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 +++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 @@ -61,6 +61,8 @@ spec: selector: matchLabels: k8s-app: nuage-cni-ds + updateStrategy: + type: RollingUpdate template: metadata: labels: @@ -104,6 +106,8 @@ spec: - mountPath: /var/log name: cni-log-dir - mountPath: {{ nuage_node_config_dsets_mount_dir }} + name: var-usr-share-dir + - mountPath: /usr/share/ name: usr-share-dir volumes: - name: cni-bin-dir @@ -121,9 +125,12 @@ spec: - name: cni-log-dir hostPath: path: /var/log - - name: usr-share-dir + - name: var-usr-share-dir hostPath: path: {{ nuage_node_config_dsets_mount_dir }} + - name: usr-share-dir + hostPath: + path: /usr/share/ --- @@ -164,7 +171,7 @@ spec: - name: NUAGE_PLATFORM value: '"kvm, k8s"' - name: NUAGE_K8S_SERVICE_IPV4_SUBNET - value: '192.168.0.0\/16' + value: '172.30.0.0\/16' - name: NUAGE_NETWORK_UPLINK_INTF value: "eth0" volumeMounts: diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml index 114514d7c..5045e1cc5 100644 --- a/roles/nuage_master/vars/main.yaml +++ b/roles/nuage_master/vars/main.yaml @@ -26,9 +26,10 @@ nuage_master_config_dsets_mount_dir: /usr/share/ nuage_node_config_dsets_mount_dir: /usr/share/ nuage_cni_bin_dsets_mount_dir: /opt/cni/bin nuage_cni_netconf_dsets_mount_dir: /etc/cni/net.d -nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.1.1') }}" -nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.1.1') }}" -nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.1.1') }}" +nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.2.1') }}" +nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.2.1') }}" +nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.2.1') }}" +nuage_infra_container_image_version: "{{ nuage_infra_image_version | default('v5.2.1') }}" api_server_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" nuage_vport_mtu: "{{ nuage_interface_mtu | default('1460') }}" master_host_type: "{{ master_base_host_type | default('is_rhel_server') }}" diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 8c8227b5e..178e0849c 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -42,68 +42,93 @@ openshift_aws_ami_tags: openshift_aws_s3_mode: create openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry" -openshift_aws_elb_health_check: - ping_protocol: tcp - ping_port: 443 - response_timeout: 5 - interval: 30 - unhealthy_threshold: 2 - healthy_threshold: 2 - openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}" -openshift_aws_elb_name_dict: - master: - external: "{{ openshift_aws_elb_basename }}-master-external" - internal: "{{ openshift_aws_elb_basename }}-master-internal" - infra: - external: "{{ openshift_aws_elb_basename }}-infra" - -openshift_aws_elb_idle_timout: 400 -openshift_aws_elb_scheme: internet-facing -openshift_aws_elb_cert_arn: '' openshift_aws_elb_dict: master: external: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: ssl - instance_port: 443 - - protocol: ssl - load_balancer_port: 443 - instance_protocol: ssl - instance_port: 443 - # ssl certificate required for https or ssl - ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}" + cross_az_load_balancing: False + health_check: + ping_protocol: tcp + ping_port: "{{ openshift_master_api_port | default(8443) }}" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + idle_timout: 400 + listeners: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: ssl + instance_port: "{{ openshift_master_api_port | default(8443) }}" + - protocol: ssl + load_balancer_port: "{{ openshift_master_api_port | default(8443) }}" + instance_protocol: ssl + instance_port: "{{ openshift_master_api_port | default(8443) }}" + ssl_certificate_id: '' + name: "{{ openshift_aws_elb_basename }}-master-external" + tags: "{{ openshift_aws_kube_tags }}" internal: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: tcp - instance_port: 80 - - protocol: tcp - load_balancer_port: 443 - instance_protocol: tcp - instance_port: 443 + cross_az_load_balancing: False + health_check: + ping_protocol: tcp + ping_port: "{{ openshift_master_api_port | default(8443) }}" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + idle_timout: 400 + listeners: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 80 + - protocol: tcp + load_balancer_port: "{{ openshift_master_api_port | default(8443) }}" + instance_protocol: tcp + instance_port: "{{ openshift_master_api_port | default(8443) }}" + name: "{{ openshift_aws_elb_basename }}-master-internal" + tags: "{{ openshift_aws_kube_tags }}" infra: external: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: tcp - instance_port: 443 - proxy_protocol: True - - protocol: tcp - load_balancer_port: 443 - instance_protocol: tcp - instance_port: 443 - proxy_protocol: True + cross_az_load_balancing: False + health_check: + ping_protocol: tcp + ping_port: 443 + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + idle_timout: 400 + listeners: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True + - protocol: tcp + load_balancer_port: 443 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True + name: "{{ openshift_aws_elb_basename }}-infra" + tags: "{{ openshift_aws_kube_tags }}" openshift_aws_node_group_config_master_volumes: +- device_name: /dev/sda1 + volume_size: 100 + device_type: gp2 + delete_on_termination: False - device_name: /dev/sdb volume_size: 100 device_type: gp2 delete_on_termination: False openshift_aws_node_group_config_node_volumes: +- device_name: /dev/sda1 + volume_size: 100 + device_type: gp2 + delete_on_termination: True - device_name: /dev/sdb volume_size: 100 device_type: gp2 @@ -164,7 +189,7 @@ openshift_aws_master_group_config: iam_role: "{{ openshift_aws_iam_role_name }}" policy_name: "{{ openshift_aws_iam_role_policy_name }}" policy_json: "{{ openshift_aws_iam_role_policy_json }}" - elbs: "{{ openshift_aws_elb_name_dict['master'].keys()| map('extract', openshift_aws_elb_name_dict['master']) | list }}" + elbs: "{{ openshift_aws_elb_dict | json_query('master.[*][0][*].name') }}" openshift_aws_node_group_config: # The 'compute' key is always required here. @@ -197,10 +222,7 @@ openshift_aws_node_group_config: iam_role: "{{ openshift_aws_iam_role_name }}" policy_name: "{{ openshift_aws_iam_role_policy_name }}" policy_json: "{{ openshift_aws_iam_role_policy_json }}" - elbs: "{{ openshift_aws_elb_name_dict['infra'].keys()| map('extract', openshift_aws_elb_name_dict['infra']) | list }}" - -openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}" -openshift_aws_elb_az_load_balancing: False + elbs: "{{ openshift_aws_elb_dict | json_query('infra.[*][0][*].name') }}" # build_instance_tags is a custom filter in role lib_utils openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" @@ -245,8 +267,8 @@ openshift_aws_node_security_groups: to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp - from_port: 443 - to_port: 443 + from_port: "{{ openshift_master_api_port | default(8443) }}" + to_port: "{{ openshift_master_api_port | default(8443) }}" cidr_ip: 0.0.0.0/0 compute: name: "{{ openshift_aws_clusterid }}_compute" @@ -260,8 +282,8 @@ openshift_aws_node_security_groups: to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp - from_port: 443 - to_port: 443 + from_port: "{{ openshift_master_api_port | default(8443) }}" + to_port: "{{ openshift_master_api_port | default(8443) }}" cidr_ip: 0.0.0.0/0 - proto: tcp from_port: 30000 @@ -274,8 +296,6 @@ openshift_aws_node_security_groups: openshift_aws_vpc_tags: Name: "{{ openshift_aws_vpc_name }}" -openshift_aws_subnet_az: us-east-1c - openshift_aws_vpc: name: "{{ openshift_aws_vpc_name }}" cidr: 172.31.0.0/16 @@ -283,13 +303,20 @@ openshift_aws_vpc: us-east-1: - cidr: 172.31.48.0/20 az: "us-east-1c" + default_az: true - cidr: 172.31.32.0/20 az: "us-east-1e" - cidr: 172.31.16.0/20 az: "us-east-1a" +openshift_aws_subnet_az: "{{ openshift_aws_vpc.subnets[openshift_aws_region] | get_default_az }}" + openshift_aws_node_run_bootstrap_startup: True openshift_aws_node_user_data: '' openshift_aws_node_config_namespace: openshift-node openshift_aws_masters_groups: masters,etcd,nodes + +# By default, don't delete things like the shared IAM instance +# profile and uploaded ssh keys +openshift_aws_enable_uninstall_shared_objects: False diff --git a/roles/openshift_aws/tasks/accept_nodes.yml b/roles/openshift_aws/tasks/accept_nodes.yml index c2a2cea30..db30fe5c9 100644 --- a/roles/openshift_aws/tasks/accept_nodes.yml +++ b/roles/openshift_aws/tasks/accept_nodes.yml @@ -1,4 +1,6 @@ --- +- include_tasks: setup_master_group.yml + - name: fetch masters ec2_instance_facts: region: "{{ openshift_aws_region | default('us-east-1') }}" @@ -36,4 +38,4 @@ nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}" timeout: 60 register: nodeout - delegate_to: "{{ mastersout.instances[0].public_ip_address }}" + delegate_to: "{{ groups.masters.0 }}" diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index 5d371ec7a..d8257cf31 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -5,18 +5,18 @@ - name: "Create ELB {{ l_elb_dict_item.key }}" ec2_elb_lb: - name: "{{ l_openshift_aws_elb_name_dict[l_elb_dict_item.key][item.key] }}" + name: "{{ item.value.name }}" state: present - cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}" + cross_az_load_balancing: "{{ item.value.cross_az_load_balancing }}" security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}" - idle_timeout: "{{ openshift_aws_elb_idle_timout }}" + idle_timeout: "{{ item.value.idle_timout }}" region: "{{ openshift_aws_region }}" subnets: - "{{ subnetout.subnets[0].id }}" - health_check: "{{ openshift_aws_elb_health_check }}" - listeners: "{{ item.value }}" - scheme: "{{ openshift_aws_elb_scheme }}" - tags: "{{ openshift_aws_elb_tags }}" + health_check: "{{ item.value.health_check }}" + listeners: "{{ item.value.listeners }}" + scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}" + tags: "{{ item.value.tags }}" wait: True register: new_elb with_dict: "{{ l_elb_dict_item.value }}" diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml index 530b0134d..c2e362acd 100644 --- a/roles/openshift_aws/tasks/master_facts.yml +++ b/roles/openshift_aws/tasks/master_facts.yml @@ -3,7 +3,7 @@ ec2_elb_facts: region: "{{ openshift_aws_region }}" names: - - "{{ openshift_aws_elb_name_dict['master']['internal'] }}" + - "{{ openshift_aws_elb_dict['master']['internal']['name'] }}" delegate_to: localhost register: elbs diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml index 786a2e4cf..2b5f317d8 100644 --- a/roles/openshift_aws/tasks/provision.yml +++ b/roles/openshift_aws/tasks/provision.yml @@ -1,23 +1,6 @@ --- -- when: openshift_aws_create_iam_cert | bool - name: create the iam_cert for elb certificate - include_tasks: iam_cert.yml - -- when: openshift_aws_create_s3 | bool - name: create s3 bucket for registry - include_tasks: s3.yml - - include_tasks: vpc_and_subnet_id.yml -- name: create elbs - include_tasks: elb.yml - with_dict: "{{ openshift_aws_elb_dict }}" - vars: - l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}" - l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}" - loop_control: - loop_var: l_elb_dict_item - - name: include scale group creation for master include_tasks: build_node_group.yml with_items: "{{ openshift_aws_master_group }}" diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml new file mode 100644 index 000000000..fcc49c3ea --- /dev/null +++ b/roles/openshift_aws/tasks/provision_elb.yml @@ -0,0 +1,14 @@ +--- +- when: openshift_aws_create_iam_cert | bool + name: create the iam_cert for elb certificate + include_tasks: iam_cert.yml + +- include_tasks: vpc_and_subnet_id.yml + +- name: create elbs + include_tasks: elb.yml + with_dict: "{{ openshift_aws_elb_dict }}" + vars: + l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}" + loop_control: + loop_var: l_elb_dict_item diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml index d82f18574..9105b5b4c 100644 --- a/roles/openshift_aws/tasks/provision_nodes.yml +++ b/roles/openshift_aws/tasks/provision_nodes.yml @@ -2,25 +2,12 @@ # Get bootstrap config token # bootstrap should be created on first master # need to fetch it and shove it into cloud data -- name: fetch master instances - ec2_instance_facts: - region: "{{ openshift_aws_region }}" - filters: - "tag:clusterid": "{{ openshift_aws_clusterid }}" - "tag:host-type": master - instance-state-name: running - register: instancesout - retries: 20 - delay: 3 - until: - - "'instances' in instancesout" - - instancesout.instances|length > 0 +- include_tasks: setup_master_group.yml - name: slurp down the bootstrap.kubeconfig slurp: src: /etc/origin/master/bootstrap.kubeconfig - delegate_to: "{{ instancesout.instances[0].public_ip_address }}" - remote_user: root + delegate_to: "{{ groups.masters.0 }}" register: bootstrap - name: set_fact for kubeconfig token diff --git a/roles/openshift_aws/tasks/uninstall_security_group.yml b/roles/openshift_aws/tasks/uninstall_security_group.yml new file mode 100644 index 000000000..55d40e8ec --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_security_group.yml @@ -0,0 +1,14 @@ +--- +- name: delete the node group sgs + oo_ec2_group: + state: absent + name: "{{ item.value.name}}" + region: "{{ openshift_aws_region }}" + with_dict: "{{ openshift_aws_node_security_groups }}" + +- name: delete the k8s sgs for the node group + oo_ec2_group: + state: absent + name: "{{ item.value.name }}_k8s" + region: "{{ openshift_aws_region }}" + with_dict: "{{ openshift_aws_node_security_groups }}" diff --git a/roles/openshift_aws/tasks/uninstall_ssh_keys.yml b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml new file mode 100644 index 000000000..27e42da53 --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml @@ -0,0 +1,9 @@ +--- +- name: Remove the public keys for the user(s) + ec2_key: + state: absent + name: "{{ item.key_name }}" + region: "{{ openshift_aws_region }}" + with_items: "{{ openshift_aws_users }}" + no_log: True + when: openshift_aws_enable_uninstall_shared_objects | bool diff --git a/roles/openshift_aws/tasks/uninstall_vpc.yml b/roles/openshift_aws/tasks/uninstall_vpc.yml new file mode 100644 index 000000000..ecf39f694 --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_vpc.yml @@ -0,0 +1,36 @@ +--- +- name: Fetch the VPC for the vpc.id + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_clusterid }}" + register: vpcout +- debug: + var: vpcout + verbosity: 1 + +- when: vpcout.vpcs | length > 0 + block: + - name: delete the vpc igw + ec2_vpc_igw: + state: absent + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + register: igw + + - name: delete the vpc subnets + ec2_vpc_subnet: + state: absent + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + cidr: "{{ item.cidr }}" + az: "{{ item.az }}" + with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}" + + - name: Delete AWS VPC + ec2_vpc_net: + state: absent + region: "{{ openshift_aws_region }}" + name: "{{ openshift_aws_clusterid }}" + cidr_block: "{{ openshift_aws_vpc.cidr }}" + register: vpc diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml new file mode 100644 index 000000000..90ee40943 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml @@ -0,0 +1,10 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: bootstrap-autoapprover +roleRef: + kind: ClusterRole + name: system:node-bootstrap-autoapprover +subjects: +- kind: User + name: system:serviceaccount:openshift-infra:bootstrap-autoapprover diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml new file mode 100644 index 000000000..d8143d047 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: system:node-bootstrap-autoapprover +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - delete + - get + - list + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - create + - update diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml new file mode 100644 index 000000000..e22ce6f34 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: bootstrap-autoapprover + namespace: openshift-infra diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml new file mode 100644 index 000000000..dbcedb407 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml @@ -0,0 +1,68 @@ +kind: StatefulSet +apiVersion: apps/v1beta1 +metadata: + name: bootstrap-autoapprover + namespace: openshift-infra +spec: + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: bootstrap-autoapprover + spec: + serviceAccountName: bootstrap-autoapprover + terminationGracePeriodSeconds: 1 + containers: + - name: signer + image: openshift/node:v3.7.0-rc.0 + command: + - /bin/bash + - -c + args: + - | + #!/bin/bash + set -o errexit + set -o nounset + set -o pipefail + + unset KUBECONFIG + cat <<SCRIPT > /tmp/signer + #!/bin/bash + # + # It will approve any CSR that is not approved yet, and delete any CSR that expired more than 60 seconds + # ago. + # + + set -o errexit + set -o nounset + set -o pipefail + + name=\${1} + condition=\${2} + certificate=\${3} + username=\${4} + + # auto approve + if [[ -z "\${condition}" && ("\${username}" == "system:serviceaccount:openshift-infra:node-bootstrapper" || "\${username}" == "system:node:"* ) ]]; then + oc adm certificate approve "\${name}" + exit 0 + fi + + # check certificate age + if [[ -n "\${certificate}" ]]; then + text="\$( echo "\${certificate}" | base64 -d - )" + if ! echo "\${text}" | openssl x509 -noout; then + echo "error: Unable to parse certificate" 2>&1 + exit 1 + fi + if ! echo "\${text}" | openssl x509 -checkend -60 > /dev/null; then + echo "Certificate is expired, deleting" + oc delete csr "\${name}" + fi + exit 0 + fi + SCRIPT + chmod u+x /tmp/signer + + exec oc observe csr --maximum-errors=1 --resync-period=10m -a '{.status.conditions[*].type}' -a '{.status.certificate}' -a '{.spec.username}' -- /tmp/signer diff --git a/roles/openshift_bootstrap_autoapprover/tasks/main.yml b/roles/openshift_bootstrap_autoapprover/tasks/main.yml new file mode 100644 index 000000000..88e9d08e7 --- /dev/null +++ b/roles/openshift_bootstrap_autoapprover/tasks/main.yml @@ -0,0 +1,28 @@ +--- +- name: Copy auto-approver config to host + run_once: true + copy: + src: "{{ item }}" + dest: /tmp/openshift-approver/ + owner: root + mode: 0400 + with_fileglob: + - "*.yaml" + +- name: Set auto-approver nodeSelector + run_once: true + yedit: + src: "/tmp/openshift-approver/openshift-bootstrap-controller.yaml" + key: spec.template.spec.nodeSelector + value: "{{ openshift_master_bootstrap_auto_approver_node_selector | default({}) }}" + value_type: list + +- name: Create auto-approver on cluster + run_once: true + command: oc apply -f /tmp/openshift-approver/ + +- name: Remove auto-approver config + run_once: true + file: + path: /tmp/openshift-approver/ + state: absent diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index b94cd9fba..9c8534c74 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -19,7 +19,8 @@ - name: Reload generated facts openshift_facts: - when: hostvars[openshift_ca_host].install_result is changed + when: + - hostvars[openshift_ca_host].install_result | default({'changed':false}) is changed - name: Create openshift_ca_config_dir if it does not exist file: diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml index ee4048911..395bd304c 100644 --- a/roles/openshift_cloud_provider/tasks/gce.yml +++ b/roles/openshift_cloud_provider/tasks/gce.yml @@ -13,5 +13,11 @@ ini_file: dest: "{{ openshift.common.config_base }}/cloudprovider/gce.conf" section: Global - option: multizone - value: "true" + option: "{{ item.key }}" + value: "{{ item.value }}" + with_items: + - { key: 'project-id', value: '{{ openshift_gcp_project }}' } + - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } + - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } + - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } + - { key: 'multizone', value: 'false' } diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-job.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-job.yaml new file mode 100644 index 000000000..48d1d4e26 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-job.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cloudforms-backup +spec: + template: + metadata: + name: cloudforms-backup + spec: + containers: + - name: postgresql + image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest + command: + - "/opt/rh/cfme-container-scripts/backup_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: cloudforms-secrets + key: database-url + volumeMounts: + - name: cfme-backup-vol + mountPath: "/backups" + volumes: + - name: cfme-backup-vol + persistentVolumeClaim: + claimName: cloudforms-backup + restartPolicy: Never diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-pvc.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-pvc.yaml new file mode 100644 index 000000000..92598ce82 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-pvc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: cloudforms-backup +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 15Gi diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-backup-example.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-backup-example.yaml new file mode 100644 index 000000000..4fe349897 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-backup-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfme-pv03 +spec: + capacity: + storage: 15Gi + accessModes: + - ReadWriteOnce + nfs: + path: "/exports/cfme-pv03" + server: "<your-nfs-host-here>" + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml index 250a99b8d..0cdd821b5 100644 --- a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml @@ -1,13 +1,38 @@ apiVersion: v1 -kind: PersistentVolume +kind: Template +labels: + template: cloudforms-db-pv metadata: - name: cfme-pv01 -spec: - capacity: - storage: 15Gi - accessModes: + name: cloudforms-db-pv + annotations: + description: PV Template for CFME PostgreSQL DB + tags: PVS, CFME +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cfme-db + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: - ReadWriteOnce - nfs: - path: /exports/cfme-pv01 - server: <your-nfs-host-here> - persistentVolumeReclaimPolicy: Retain + nfs: + path: "${BASE_PATH}/cfme-db" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for DB + required: true + description: The size of the CFME DB PV given in Gi + value: 15Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-region-example.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-region-example.yaml deleted file mode 100644 index cba9bbe35..000000000 --- a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-region-example.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: cfme-pv02 -spec: - capacity: - storage: 5Gi - accessModes: - - ReadWriteOnce - nfs: - path: /exports/cfme-pv02 - server: <your-nfs-host-here> - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml index c08c21265..527090ae8 100644 --- a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml @@ -1,13 +1,38 @@ apiVersion: v1 -kind: PersistentVolume +kind: Template +labels: + template: cloudforms-app-pv metadata: - name: cfme-pv03 -spec: - capacity: - storage: 5Gi - accessModes: + name: cloudforms-app-pv + annotations: + description: PV Template for CFME Server + tags: PVS, CFME +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cfme-app + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: - ReadWriteOnce - nfs: - path: /exports/cfme-pv03 - server: <your-nfs-host-here> - persistentVolumeReclaimPolicy: Retain + nfs: + path: "${BASE_PATH}/cfme-app" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for App + required: true + description: The size of the CFME APP PV given in Gi + value: 5Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-restore-job.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-restore-job.yaml new file mode 100644 index 000000000..7fd4fc2e1 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-restore-job.yaml @@ -0,0 +1,35 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cloudforms-restore +spec: + template: + metadata: + name: cloudforms-restore + spec: + containers: + - name: postgresql + image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest + command: + - "/opt/rh/cfme-container-scripts/restore_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: cloudforms-secrets + key: database-url + - name: BACKUP_VERSION + value: latest + volumeMounts: + - name: cfme-backup-vol + mountPath: "/backups" + - name: cfme-prod-vol + mountPath: "/restore" + volumes: + - name: cfme-backup-vol + persistentVolumeClaim: + claimName: cloudforms-backup + - name: cfme-prod-vol + persistentVolumeClaim: + claimName: cloudforms-postgresql + restartPolicy: Never diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-scc-sysadmin.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-scc-sysadmin.yaml new file mode 100644 index 000000000..d2ece9298 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-scc-sysadmin.yaml @@ -0,0 +1,38 @@ +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +allowedCapabilities: +apiVersion: v1 +defaultAddCapabilities: +- SYS_ADMIN +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: cfme-sysadmin provides all features of the anyuid SCC but allows users to have SYS_ADMIN capabilities. This is the required scc for Pods requiring to run with systemd and the message bus. + creationTimestamp: + name: cfme-sysadmin +priority: 10 +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +- SYS_CHROOT +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- secret diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template-ext-db.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template-ext-db.yaml new file mode 100644 index 000000000..9866c29c3 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template-ext-db.yaml @@ -0,0 +1,956 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms-ext-db +metadata: + name: cloudforms-ext-db + annotations: + description: CloudForms appliance with persistent storage using a external DB host + tags: instant-app,cloudforms,cfme + iconClass: icon-rails +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + admin-password: "${APPLICATION_ADMIN_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances CloudForms pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${NAME}" + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: "${NAME}" +- apiVersion: v1 + kind: Route + metadata: + name: "${HTTPD_SERVICE_NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + name: "${HTTPD_SERVICE_NAME}" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: "${NAME}" + name: "${NAME}" + spec: + containers: + - name: cloudforms + image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: APPLICATION_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: admin-password + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for CloudForms backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: cloudforms + image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Exposes the memcached server + spec: + ports: + - name: memcached + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Defines how to deploy memcached + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Remote database service + spec: + ports: + - name: postgresql + port: 5432 + targetPort: "${{DATABASE_PORT}}" + selector: {} +- apiVersion: v1 + kind: Endpoints + metadata: + name: "${DATABASE_SERVICE_NAME}" + subsets: + - addresses: + - ip: "${DATABASE_IP}" + ports: + - port: "${{DATABASE_PORT}}" + name: postgresql +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: cfme-privileged + serviceAccountName: cfme-privileged +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP + ServerName https://%{REQUEST_HOST} + + ProxyPreserveHost on + + RewriteCond %{REQUEST_URI} ^/ws [NC] + RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC] + RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC] + RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L] + + # For httpd, some ErrorDocuments must by served by the httpd pod + RewriteCond %{REQUEST_URI} !^/proxy_pages + + # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod + RewriteCond %{REQUEST_URI} !^/saml2 + RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L] + ProxyPassReverse / http://${NAME}/ + + # Ensures httpd stdout/stderr are seen by docker logs. + ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log" + CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common + </VirtualHost> + authentication.conf: | + # Load appropriate authentication configuration files + # + Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth" + configuration-internal-auth: | + # Internal authentication + # + configuration-external-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/http.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-active-directory-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/krb5.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-saml-auth: | + LoadModule auth_mellon_module modules/mod_auth_mellon.so + + <Location /> + MellonEnable "info" + + MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml" + + MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key" + MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert" + MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml" + + MellonVariable "sp-cookie" + MellonSecureCookie On + MellonCookiePath "/" + + MellonIdP "IDP" + + MellonEndpointPath "/saml2" + + MellonUser username + MellonMergeEnvVars On + + MellonSetEnvNoPrefix "REMOTE_USER" username + MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email + MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname + MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname + MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname + MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups + </Location> + + <Location /saml_login> + AuthType "Mellon" + MellonEnable "auth" + Require valid-user + </Location> + + Include "conf.d/external-auth-remote-user-conf" + external-auth-load-modules-conf: | + LoadModule authnz_pam_module modules/mod_authnz_pam.so + LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so + LoadModule lookup_identity_module modules/mod_lookup_identity.so + LoadModule auth_kerb_module modules/mod_auth_kerb.so + external-auth-login-form-conf: | + <Location /dashboard/external_authenticate> + InterceptFormPAMService httpd-auth + InterceptFormLogin user_name + InterceptFormPassword user_password + InterceptFormLoginSkip admin + InterceptFormClearRemoteUserForSkipped on + </Location> + external-auth-application-api-conf: | + <LocationMatch ^/api> + SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in + SetEnvIf X-Auth-Token '^.+$' let_api_token_in + SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in + + AuthType Basic + AuthName "External Authentication (httpd) for API" + AuthBasicProvider PAM + + AuthPAMService httpd-auth + Require valid-user + Order Allow,Deny + Allow from env=let_admin_in + Allow from env=let_api_token_in + Allow from env=let_sys_token_in + Satisfy Any + </LocationMatch> + external-auth-lookup-user-details-conf: | + <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api> + LookupUserAttr mail REMOTE_USER_EMAIL + LookupUserAttr givenname REMOTE_USER_FIRSTNAME + LookupUserAttr sn REMOTE_USER_LASTNAME + LookupUserAttr displayname REMOTE_USER_FULLNAME + LookupUserAttr domainname REMOTE_USER_DOMAIN + + LookupUserGroups REMOTE_USER_GROUPS ":" + LookupDbusTimeout 5000 + </LocationMatch> + external-auth-remote-user-conf: | + RequestHeader unset X_REMOTE_USER + + RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER + RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR + RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL + RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME + RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME + RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME + RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS + RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-kerberos-realms: undefined + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_DBUS_API_SERVICE_NAME}" + annotations: + description: Exposes the httpd server dbus api + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http-dbus-api + port: 8080 + targetPort: 8080 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 8080 + protocol: TCP + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + - name: HTTPD_AUTH_KERBEROS_REALMS + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-kerberos-realms + lifecycle: + postStart: + exec: + command: + - "/usr/bin/save-container-environment" + serviceAccount: cfme-httpd + serviceAccountName: cfme-httpd +parameters: +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: cloudforms +- name: V2_KEY + displayName: CloudForms Encryption Key + required: true + description: Encryption Key for CloudForms Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_IP + displayName: PostgreSQL Server IP + required: true + description: PostgreSQL external server IP used to configure service. + value: '' +- name: DATABASE_PORT + displayName: PostgreSQL Server Port + required: true + description: PostgreSQL external server port used to configure service. + value: '5432' +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: APPLICATION_ADMIN_PASSWORD + displayName: Application Admin Password + required: true + description: Admin password that will be set on the application. + value: smartvm +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: FRONTEND_APPLICATION_IMG_NAME + displayName: Frontend Application Image Name + description: This is the Frontend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui +- name: BACKEND_APPLICATION_IMG_NAME + displayName: Backend Application Image Name + description: This is the Backend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the CloudForms Frontend Application image tag/version requested to deploy. + value: latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the CloudForms Backend Application image tag/version requested to deploy. + value: latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_DBUS_API_SERVICE_NAME + required: true + displayName: Apache httpd DBus API Service Name + description: The name of httpd dbus api service. + value: httpd-dbus-api +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache httpd Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml index 3bc6c5813..5c757b6c2 100644 --- a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml @@ -5,17 +5,308 @@ labels: metadata: name: cloudforms annotations: - description: "CloudForms appliance with persistent storage" - tags: "instant-app,cloudforms,cfme" - iconClass: "icon-rails" + description: CloudForms appliance with persistent storage + tags: instant-app,cloudforms,cfme + iconClass: icon-rails objects: - apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + admin-password: "${APPLICATION_ADMIN_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${DATABASE_SERVICE_NAME}-configs" + data: + 01_miq_overrides.conf: | + #------------------------------------------------------------------------------ + # CONNECTIONS AND AUTHENTICATION + #------------------------------------------------------------------------------ + + tcp_keepalives_count = 9 + tcp_keepalives_idle = 3 + tcp_keepalives_interval = 75 + + #------------------------------------------------------------------------------ + # RESOURCE USAGE (except WAL) + #------------------------------------------------------------------------------ + + shared_preload_libraries = 'pglogical,repmgr_funcs' + max_worker_processes = 10 + + #------------------------------------------------------------------------------ + # WRITE AHEAD LOG + #------------------------------------------------------------------------------ + + wal_level = 'logical' + wal_log_hints = on + wal_buffers = 16MB + checkpoint_completion_target = 0.9 + + #------------------------------------------------------------------------------ + # REPLICATION + #------------------------------------------------------------------------------ + + max_wal_senders = 10 + wal_sender_timeout = 0 + max_replication_slots = 10 + hot_standby = on + + #------------------------------------------------------------------------------ + # ERROR REPORTING AND LOGGING + #------------------------------------------------------------------------------ + + log_filename = 'postgresql.log' + log_rotation_age = 0 + log_min_duration_statement = 5000 + log_connections = on + log_disconnections = on + log_line_prefix = '%t:%r:%c:%u@%d:[%p]:' + log_lock_waits = on + + #------------------------------------------------------------------------------ + # AUTOVACUUM PARAMETERS + #------------------------------------------------------------------------------ + + log_autovacuum_min_duration = 0 + autovacuum_naptime = 5min + autovacuum_vacuum_threshold = 500 + autovacuum_analyze_threshold = 500 + autovacuum_vacuum_scale_factor = 0.05 + + #------------------------------------------------------------------------------ + # LOCK MANAGEMENT + #------------------------------------------------------------------------------ + + deadlock_timeout = 5s + + #------------------------------------------------------------------------------ + # VERSION/PLATFORM COMPATIBILITY + #------------------------------------------------------------------------------ + + escape_string_warning = off + standard_conforming_strings = off +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP + ServerName https://%{REQUEST_HOST} + + ProxyPreserveHost on + + RewriteCond %{REQUEST_URI} ^/ws [NC] + RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC] + RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC] + RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L] + + # For httpd, some ErrorDocuments must by served by the httpd pod + RewriteCond %{REQUEST_URI} !^/proxy_pages + + # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod + RewriteCond %{REQUEST_URI} !^/saml2 + RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L] + ProxyPassReverse / http://${NAME}/ + + # Ensures httpd stdout/stderr are seen by docker logs. + ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log" + CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common + </VirtualHost> + authentication.conf: | + # Load appropriate authentication configuration files + # + Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth" + configuration-internal-auth: | + # Internal authentication + # + configuration-external-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/http.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-active-directory-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/krb5.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-saml-auth: | + LoadModule auth_mellon_module modules/mod_auth_mellon.so + + <Location /> + MellonEnable "info" + + MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml" + + MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key" + MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert" + MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml" + + MellonVariable "sp-cookie" + MellonSecureCookie On + MellonCookiePath "/" + + MellonIdP "IDP" + + MellonEndpointPath "/saml2" + + MellonUser username + MellonMergeEnvVars On + + MellonSetEnvNoPrefix "REMOTE_USER" username + MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email + MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname + MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname + MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname + MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups + </Location> + + <Location /saml_login> + AuthType "Mellon" + MellonEnable "auth" + Require valid-user + </Location> + + Include "conf.d/external-auth-remote-user-conf" + external-auth-load-modules-conf: | + LoadModule authnz_pam_module modules/mod_authnz_pam.so + LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so + LoadModule lookup_identity_module modules/mod_lookup_identity.so + LoadModule auth_kerb_module modules/mod_auth_kerb.so + external-auth-login-form-conf: | + <Location /dashboard/external_authenticate> + InterceptFormPAMService httpd-auth + InterceptFormLogin user_name + InterceptFormPassword user_password + InterceptFormLoginSkip admin + InterceptFormClearRemoteUserForSkipped on + </Location> + external-auth-application-api-conf: | + <LocationMatch ^/api> + SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in + SetEnvIf X-Auth-Token '^.+$' let_api_token_in + SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in + + AuthType Basic + AuthName "External Authentication (httpd) for API" + AuthBasicProvider PAM + + AuthPAMService httpd-auth + Require valid-user + Order Allow,Deny + Allow from env=let_admin_in + Allow from env=let_api_token_in + Allow from env=let_sys_token_in + Satisfy Any + </LocationMatch> + external-auth-lookup-user-details-conf: | + <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api> + LookupUserAttr mail REMOTE_USER_EMAIL + LookupUserAttr givenname REMOTE_USER_FIRSTNAME + LookupUserAttr sn REMOTE_USER_LASTNAME + LookupUserAttr displayname REMOTE_USER_FULLNAME + LookupUserAttr domainname REMOTE_USER_DOMAIN + + LookupUserGroups REMOTE_USER_GROUPS ":" + LookupDbusTimeout 5000 + </LocationMatch> + external-auth-remote-user-conf: | + RequestHeader unset X_REMOTE_USER + + RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER + RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR + RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL + RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME + RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME + RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME + RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS + RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-kerberos-realms: undefined + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 kind: Service metadata: annotations: - description: "Exposes and load balances CloudForms pods" + description: Exposes and load balances CloudForms pods service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' - name: ${NAME} + name: "${NAME}" spec: clusterIP: None ports: @@ -23,141 +314,97 @@ objects: port: 80 protocol: TCP targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 selector: - name: ${NAME} + name: "${NAME}" - apiVersion: v1 kind: Route metadata: - name: ${NAME} + name: "${HTTPD_SERVICE_NAME}" spec: - host: ${APPLICATION_DOMAIN} + host: "${APPLICATION_DOMAIN}" port: - targetPort: https + targetPort: http tls: - termination: passthrough + termination: edge + insecureEdgeTerminationPolicy: Redirect to: kind: Service - name: ${NAME} -- apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-app - annotations: - description: "Keeps track of changes in the CloudForms app image" - spec: - dockerImageRepository: "${APPLICATION_IMG_NAME}" -- apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-postgresql - annotations: - description: "Keeps track of changes in the CloudForms postgresql image" - spec: - dockerImageRepository: "${POSTGRESQL_IMG_NAME}" -- apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-memcached - annotations: - description: "Keeps track of changes in the CloudForms memcached image" - spec: - dockerImageRepository: "${MEMCACHED_IMG_NAME}" + name: "${HTTPD_SERVICE_NAME}" - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: "${NAME}-${DATABASE_SERVICE_NAME}" spec: accessModes: - - ReadWriteOnce + - ReadWriteOnce resources: requests: - storage: ${DATABASE_VOLUME_CAPACITY} -- apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: "${NAME}-region" - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: ${APPLICATION_REGION_VOLUME_CAPACITY} + storage: "${DATABASE_VOLUME_CAPACITY}" - apiVersion: apps/v1beta1 - kind: "StatefulSet" + kind: StatefulSet metadata: - name: ${NAME} + name: "${NAME}" annotations: - description: "Defines how to deploy the CloudForms appliance" + description: Defines how to deploy the CloudForms appliance spec: serviceName: "${NAME}" - replicas: 1 + replicas: "${APPLICATION_REPLICA_COUNT}" template: metadata: labels: - name: ${NAME} - name: ${NAME} + name: "${NAME}" + name: "${NAME}" spec: containers: - name: cloudforms - image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}" + image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" livenessProbe: - tcpSocket: - port: 443 + exec: + command: + - pidof + - MIQ Server initialDelaySeconds: 480 timeoutSeconds: 3 readinessProbe: - httpGet: - path: / - port: 443 - scheme: HTTPS + tcpSocket: + port: 80 initialDelaySeconds: 200 timeoutSeconds: 3 ports: - containerPort: 80 protocol: TCP - - containerPort: 443 - protocol: TCP - securityContext: - privileged: true volumeMounts: - - - name: "${NAME}-server" - mountPath: "/persistent" - - - name: "${NAME}-region" - mountPath: "/persistent-region" + - name: "${NAME}-server" + mountPath: "/persistent" env: - - - name: "APPLICATION_INIT_DELAY" - value: "${APPLICATION_INIT_DELAY}" - - - name: "DATABASE_SERVICE_NAME" - value: "${DATABASE_SERVICE_NAME}" - - - name: "DATABASE_REGION" - value: "${DATABASE_REGION}" - - - name: "MEMCACHED_SERVICE_NAME" - value: "${MEMCACHED_SERVICE_NAME}" - - - name: "POSTGRESQL_USER" - value: "${DATABASE_USER}" - - - name: "POSTGRESQL_PASSWORD" - value: "${DATABASE_PASSWORD}" - - - name: "POSTGRESQL_DATABASE" - value: "${DATABASE_NAME}" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - value: "${POSTGRESQL_MAX_CONNECTIONS}" - - - name: "POSTGRESQL_SHARED_BUFFERS" - value: "${POSTGRESQL_SHARED_BUFFERS}" + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: APPLICATION_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: admin-password + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password resources: requests: memory: "${APPLICATION_MEM_REQ}" @@ -168,59 +415,128 @@ objects: preStop: exec: command: - - /opt/rh/cfme-container-scripts/sync-pv-data - volumes: - - - name: "${NAME}-region" - persistentVolumeClaim: - claimName: ${NAME}-region + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 volumeClaimTemplates: - - metadata: - name: "${NAME}-server" - annotations: - # Uncomment this if using dynamic volume provisioning. - # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html - # volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ ReadWriteOnce ] + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for CloudForms backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: cloudforms + image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password resources: requests: - storage: "${APPLICATION_VOLUME_CAPACITY}" + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" - apiVersion: v1 - kind: "Service" + kind: Service metadata: name: "${MEMCACHED_SERVICE_NAME}" annotations: - description: "Exposes the memcached server" + description: Exposes the memcached server spec: ports: - - - name: "memcached" - port: 11211 - targetPort: 11211 + - name: memcached + port: 11211 + targetPort: 11211 selector: name: "${MEMCACHED_SERVICE_NAME}" - apiVersion: v1 - kind: "DeploymentConfig" + kind: DeploymentConfig metadata: name: "${MEMCACHED_SERVICE_NAME}" annotations: - description: "Defines how to deploy memcached" + description: Defines how to deploy memcached spec: strategy: - type: "Recreate" + type: Recreate triggers: - - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "memcached" - from: - kind: "ImageStreamTag" - name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}" - - - type: "ConfigChange" + - type: ConfigChange replicas: 1 selector: name: "${MEMCACHED_SERVICE_NAME}" @@ -232,74 +548,58 @@ objects: spec: volumes: [] containers: - - - name: "memcached" - image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" - ports: - - - containerPort: 11211 - readinessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 5 - tcpSocket: - port: 11211 - livenessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 30 - tcpSocket: - port: 11211 - volumeMounts: [] - env: - - - name: "MEMCACHED_MAX_MEMORY" - value: "${MEMCACHED_MAX_MEMORY}" - - - name: "MEMCACHED_MAX_CONNECTIONS" - value: "${MEMCACHED_MAX_CONNECTIONS}" - - - name: "MEMCACHED_SLAB_PAGE_SIZE" - value: "${MEMCACHED_SLAB_PAGE_SIZE}" - resources: - requests: - memory: "${MEMCACHED_MEM_REQ}" - cpu: "${MEMCACHED_CPU_REQ}" - limits: - memory: "${MEMCACHED_MEM_LIMIT}" + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" - apiVersion: v1 - kind: "Service" + kind: Service metadata: name: "${DATABASE_SERVICE_NAME}" annotations: - description: "Exposes the database server" + description: Exposes the database server spec: ports: - - - name: "postgresql" - port: 5432 - targetPort: 5432 + - name: postgresql + port: 5432 + targetPort: 5432 selector: name: "${DATABASE_SERVICE_NAME}" - apiVersion: v1 - kind: "DeploymentConfig" + kind: DeploymentConfig metadata: name: "${DATABASE_SERVICE_NAME}" annotations: - description: "Defines how to deploy the database" + description: Defines how to deploy the database spec: strategy: - type: "Recreate" + type: Recreate triggers: - - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "postgresql" - from: - kind: "ImageStreamTag" - name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}" - - - type: "ConfigChange" + - type: ConfigChange replicas: 1 selector: name: "${DATABASE_SERVICE_NAME}" @@ -310,236 +610,524 @@ objects: name: "${DATABASE_SERVICE_NAME}" spec: volumes: - - - name: "cfme-pgdb-volume" - persistentVolumeClaim: - claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + - name: cfme-pgdb-volume + persistentVolumeClaim: + claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + - name: cfme-pg-configs + configMap: + name: "${DATABASE_SERVICE_NAME}-configs" containers: - - - name: "postgresql" - image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" - ports: - - - containerPort: 5432 - readinessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 15 + - name: postgresql + image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" + ports: + - containerPort: 5432 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 15 + exec: + command: + - "/bin/sh" + - "-i" + - "-c" + - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1' + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 60 + tcpSocket: + port: 5432 + volumeMounts: + - name: cfme-pgdb-volume + mountPath: "/var/lib/pgsql/data" + - name: cfme-pg-configs + mountPath: "${POSTGRESQL_CONFIG_DIR}" + env: + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${DATABASE_NAME}" + - name: POSTGRESQL_MAX_CONNECTIONS + value: "${POSTGRESQL_MAX_CONNECTIONS}" + - name: POSTGRESQL_SHARED_BUFFERS + value: "${POSTGRESQL_SHARED_BUFFERS}" + - name: POSTGRESQL_CONFIG_DIR + value: "${POSTGRESQL_CONFIG_DIR}" + resources: + requests: + memory: "${POSTGRESQL_MEM_REQ}" + cpu: "${POSTGRESQL_CPU_REQ}" + limits: + memory: "${POSTGRESQL_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: cfme-privileged + serviceAccountName: cfme-privileged +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_DBUS_API_SERVICE_NAME}" + annotations: + description: Exposes the httpd server dbus api + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http-dbus-api + port: 8080 + targetPort: 8080 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 8080 + protocol: TCP + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + - name: HTTPD_AUTH_KERBEROS_REALMS + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-kerberos-realms + lifecycle: + postStart: exec: command: - - "/bin/sh" - - "-i" - - "-c" - - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'" - livenessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 60 - tcpSocket: - port: 5432 - volumeMounts: - - - name: "cfme-pgdb-volume" - mountPath: "/var/lib/pgsql/data" - env: - - - name: "POSTGRESQL_USER" - value: "${DATABASE_USER}" - - - name: "POSTGRESQL_PASSWORD" - value: "${DATABASE_PASSWORD}" - - - name: "POSTGRESQL_DATABASE" - value: "${DATABASE_NAME}" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - value: "${POSTGRESQL_MAX_CONNECTIONS}" - - - name: "POSTGRESQL_SHARED_BUFFERS" - value: "${POSTGRESQL_SHARED_BUFFERS}" - resources: - requests: - memory: "${POSTGRESQL_MEM_REQ}" - cpu: "${POSTGRESQL_CPU_REQ}" - limits: - memory: "${POSTGRESQL_MEM_LIMIT}" - + - "/usr/bin/save-container-environment" + serviceAccount: cfme-httpd + serviceAccountName: cfme-httpd parameters: - - - name: "NAME" - displayName: Name - required: true - description: "The name assigned to all of the frontend objects defined in this template." - value: cloudforms - - - name: "DATABASE_SERVICE_NAME" - displayName: "PostgreSQL Service Name" - required: true - description: "The name of the OpenShift Service exposed for the PostgreSQL container." - value: "postgresql" - - - name: "DATABASE_USER" - displayName: "PostgreSQL User" - required: true - description: "PostgreSQL user that will access the database." - value: "root" - - - name: "DATABASE_PASSWORD" - displayName: "PostgreSQL Password" - required: true - description: "Password for the PostgreSQL user." - value: "smartvm" - - - name: "DATABASE_NAME" - required: true - displayName: "PostgreSQL Database Name" - description: "Name of the PostgreSQL database accessed." - value: "vmdb_production" - - - name: "DATABASE_REGION" - required: true - displayName: "Application Database Region" - description: "Database region that will be used for application." - value: "0" - - - name: "MEMCACHED_SERVICE_NAME" - required: true - displayName: "Memcached Service Name" - description: "The name of the OpenShift Service exposed for the Memcached container." - value: "memcached" - - - name: "MEMCACHED_MAX_MEMORY" - displayName: "Memcached Max Memory" - description: "Memcached maximum memory for memcached object storage in MB." - value: "64" - - - name: "MEMCACHED_MAX_CONNECTIONS" - displayName: "Memcached Max Connections" - description: "Memcached maximum number of connections allowed." - value: "1024" - - - name: "MEMCACHED_SLAB_PAGE_SIZE" - displayName: "Memcached Slab Page Size" - description: "Memcached size of each slab page." - value: "1m" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - displayName: "PostgreSQL Max Connections" - description: "PostgreSQL maximum number of database connections allowed." - value: "100" - - - name: "POSTGRESQL_SHARED_BUFFERS" - displayName: "PostgreSQL Shared Buffer Amount" - description: "Amount of memory dedicated for PostgreSQL shared memory buffers." - value: "256MB" - - - name: "APPLICATION_CPU_REQ" - displayName: "Application Min CPU Requested" - required: true - description: "Minimum amount of CPU time the Application container will need (expressed in millicores)." - value: "1000m" - - - name: "POSTGRESQL_CPU_REQ" - displayName: "PostgreSQL Min CPU Requested" - required: true - description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)." - value: "500m" - - - name: "MEMCACHED_CPU_REQ" - displayName: "Memcached Min CPU Requested" - required: true - description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)." - value: "200m" - - - name: "APPLICATION_MEM_REQ" - displayName: "Application Min RAM Requested" - required: true - description: "Minimum amount of memory the Application container will need." - value: "6144Mi" - - - name: "POSTGRESQL_MEM_REQ" - displayName: "PostgreSQL Min RAM Requested" - required: true - description: "Minimum amount of memory the PostgreSQL container will need." - value: "1024Mi" - - - name: "MEMCACHED_MEM_REQ" - displayName: "Memcached Min RAM Requested" - required: true - description: "Minimum amount of memory the Memcached container will need." - value: "64Mi" - - - name: "APPLICATION_MEM_LIMIT" - displayName: "Application Max RAM Limit" - required: true - description: "Maximum amount of memory the Application container can consume." - value: "16384Mi" - - - name: "POSTGRESQL_MEM_LIMIT" - displayName: "PostgreSQL Max RAM Limit" - required: true - description: "Maximum amount of memory the PostgreSQL container can consume." - value: "8192Mi" - - - name: "MEMCACHED_MEM_LIMIT" - displayName: "Memcached Max RAM Limit" - required: true - description: "Maximum amount of memory the Memcached container can consume." - value: "256Mi" - - - name: "POSTGRESQL_IMG_NAME" - displayName: "PostgreSQL Image Name" - description: "This is the PostgreSQL image name requested to deploy." - value: "registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql" - - - name: "POSTGRESQL_IMG_TAG" - displayName: "PostgreSQL Image Tag" - description: "This is the PostgreSQL image tag/version requested to deploy." - value: "latest" - - - name: "MEMCACHED_IMG_NAME" - displayName: "Memcached Image Name" - description: "This is the Memcached image name requested to deploy." - value: "registry.access.redhat.com/cloudforms45/cfme-openshift-memcached" - - - name: "MEMCACHED_IMG_TAG" - displayName: "Memcached Image Tag" - description: "This is the Memcached image tag/version requested to deploy." - value: "latest" - - - name: "APPLICATION_IMG_NAME" - displayName: "Application Image Name" - description: "This is the Application image name requested to deploy." - value: "registry.access.redhat.com/cloudforms45/cfme-openshift-app" - - - name: "APPLICATION_IMG_TAG" - displayName: "Application Image Tag" - description: "This is the Application image tag/version requested to deploy." - value: "latest" - - - name: "APPLICATION_DOMAIN" - displayName: "Application Hostname" - description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted." - value: "" - - - name: "APPLICATION_INIT_DELAY" - displayName: "Application Init Delay" - required: true - description: "Delay in seconds before we attempt to initialize the application." - value: "15" - - - name: "APPLICATION_VOLUME_CAPACITY" - displayName: "Application Volume Capacity" - required: true - description: "Volume space available for application data." - value: "5Gi" - - - name: "APPLICATION_REGION_VOLUME_CAPACITY" - displayName: "Application Region Volume Capacity" - required: true - description: "Volume space available for region application data." - value: "5Gi" - - - name: "DATABASE_VOLUME_CAPACITY" - displayName: "Database Volume Capacity" - required: true - description: "Volume space available for database." - value: "15Gi" +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: cloudforms +- name: V2_KEY + displayName: CloudForms Encryption Key + required: true + description: Encryption Key for CloudForms Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: APPLICATION_ADMIN_PASSWORD + displayName: Application Admin Password + required: true + description: Admin password that will be set on the application. + value: smartvm +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: POSTGRESQL_CONFIG_DIR + displayName: PostgreSQL Configuration Overrides + description: Directory used to store PostgreSQL configuration overrides. + value: "/var/lib/pgsql/conf.d" +- name: POSTGRESQL_MAX_CONNECTIONS + displayName: PostgreSQL Max Connections + description: PostgreSQL maximum number of database connections allowed. + value: '1000' +- name: POSTGRESQL_SHARED_BUFFERS + displayName: PostgreSQL Shared Buffer Amount + description: Amount of memory dedicated for PostgreSQL shared memory buffers. + value: 1GB +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: POSTGRESQL_CPU_REQ + displayName: PostgreSQL Min CPU Requested + required: true + description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores). + value: 500m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: POSTGRESQL_MEM_REQ + displayName: PostgreSQL Min RAM Requested + required: true + description: Minimum amount of memory the PostgreSQL container will need. + value: 4Gi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: POSTGRESQL_MEM_LIMIT + displayName: PostgreSQL Max RAM Limit + required: true + description: Maximum amount of memory the PostgreSQL container can consume. + value: 8Gi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: POSTGRESQL_IMG_NAME + displayName: PostgreSQL Image Name + description: This is the PostgreSQL image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql +- name: POSTGRESQL_IMG_TAG + displayName: PostgreSQL Image Tag + description: This is the PostgreSQL image tag/version requested to deploy. + value: latest +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: FRONTEND_APPLICATION_IMG_NAME + displayName: Frontend Application Image Name + description: This is the Frontend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui +- name: BACKEND_APPLICATION_IMG_NAME + displayName: Backend Application Image Name + description: This is the Backend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the CloudForms Frontend Application image tag/version requested to deploy. + value: latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the CloudForms Backend Application image tag/version requested to deploy. + value: latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: DATABASE_VOLUME_CAPACITY + displayName: Database Volume Capacity + required: true + description: Volume space available for database. + value: 15Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_DBUS_API_SERVICE_NAME + required: true + displayName: Apache httpd DBus API Service Name + description: The name of httpd dbus api service. + value: httpd-dbus-api +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-job.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-job.yaml new file mode 100644 index 000000000..48d1d4e26 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-job.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cloudforms-backup +spec: + template: + metadata: + name: cloudforms-backup + spec: + containers: + - name: postgresql + image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest + command: + - "/opt/rh/cfme-container-scripts/backup_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: cloudforms-secrets + key: database-url + volumeMounts: + - name: cfme-backup-vol + mountPath: "/backups" + volumes: + - name: cfme-backup-vol + persistentVolumeClaim: + claimName: cloudforms-backup + restartPolicy: Never diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-pvc.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-pvc.yaml new file mode 100644 index 000000000..92598ce82 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-pvc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: cloudforms-backup +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 15Gi diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-backup-example.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-backup-example.yaml new file mode 100644 index 000000000..4fe349897 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-backup-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfme-pv03 +spec: + capacity: + storage: 15Gi + accessModes: + - ReadWriteOnce + nfs: + path: "/exports/cfme-pv03" + server: "<your-nfs-host-here>" + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml index 250a99b8d..0cdd821b5 100644 --- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml @@ -1,13 +1,38 @@ apiVersion: v1 -kind: PersistentVolume +kind: Template +labels: + template: cloudforms-db-pv metadata: - name: cfme-pv01 -spec: - capacity: - storage: 15Gi - accessModes: + name: cloudforms-db-pv + annotations: + description: PV Template for CFME PostgreSQL DB + tags: PVS, CFME +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cfme-db + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: - ReadWriteOnce - nfs: - path: /exports/cfme-pv01 - server: <your-nfs-host-here> - persistentVolumeReclaimPolicy: Retain + nfs: + path: "${BASE_PATH}/cfme-db" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for DB + required: true + description: The size of the CFME DB PV given in Gi + value: 15Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-region-example.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-region-example.yaml deleted file mode 100644 index cba9bbe35..000000000 --- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-region-example.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: cfme-pv02 -spec: - capacity: - storage: 5Gi - accessModes: - - ReadWriteOnce - nfs: - path: /exports/cfme-pv02 - server: <your-nfs-host-here> - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml index c08c21265..527090ae8 100644 --- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml @@ -1,13 +1,38 @@ apiVersion: v1 -kind: PersistentVolume +kind: Template +labels: + template: cloudforms-app-pv metadata: - name: cfme-pv03 -spec: - capacity: - storage: 5Gi - accessModes: + name: cloudforms-app-pv + annotations: + description: PV Template for CFME Server + tags: PVS, CFME +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cfme-app + spec: + capacity: + storage: "${PV_SIZE}" + accessModes: - ReadWriteOnce - nfs: - path: /exports/cfme-pv03 - server: <your-nfs-host-here> - persistentVolumeReclaimPolicy: Retain + nfs: + path: "${BASE_PATH}/cfme-app" + server: "${NFS_HOST}" + persistentVolumeReclaimPolicy: Retain +parameters: +- name: PV_SIZE + displayName: PV Size for App + required: true + description: The size of the CFME APP PV given in Gi + value: 5Gi +- name: BASE_PATH + displayName: Exports Directory Base Path + required: true + description: The parent directory of your NFS exports + value: "/exports" +- name: NFS_HOST + displayName: NFS Server Hostname + required: true + description: The hostname or IP address of the NFS server diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-restore-job.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-restore-job.yaml new file mode 100644 index 000000000..7fd4fc2e1 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-restore-job.yaml @@ -0,0 +1,35 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cloudforms-restore +spec: + template: + metadata: + name: cloudforms-restore + spec: + containers: + - name: postgresql + image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest + command: + - "/opt/rh/cfme-container-scripts/restore_db" + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: cloudforms-secrets + key: database-url + - name: BACKUP_VERSION + value: latest + volumeMounts: + - name: cfme-backup-vol + mountPath: "/backups" + - name: cfme-prod-vol + mountPath: "/restore" + volumes: + - name: cfme-backup-vol + persistentVolumeClaim: + claimName: cloudforms-backup + - name: cfme-prod-vol + persistentVolumeClaim: + claimName: cloudforms-postgresql + restartPolicy: Never diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-scc-sysadmin.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-scc-sysadmin.yaml new file mode 100644 index 000000000..d2ece9298 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-scc-sysadmin.yaml @@ -0,0 +1,38 @@ +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +allowedCapabilities: +apiVersion: v1 +defaultAddCapabilities: +- SYS_ADMIN +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: cfme-sysadmin provides all features of the anyuid SCC but allows users to have SYS_ADMIN capabilities. This is the required scc for Pods requiring to run with systemd and the message bus. + creationTimestamp: + name: cfme-sysadmin +priority: 10 +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +- SYS_CHROOT +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- secret diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template-ext-db.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template-ext-db.yaml new file mode 100644 index 000000000..9866c29c3 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template-ext-db.yaml @@ -0,0 +1,956 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms-ext-db +metadata: + name: cloudforms-ext-db + annotations: + description: CloudForms appliance with persistent storage using a external DB host + tags: instant-app,cloudforms,cfme + iconClass: icon-rails +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + admin-password: "${APPLICATION_ADMIN_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances CloudForms pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${NAME}" + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: "${NAME}" +- apiVersion: v1 + kind: Route + metadata: + name: "${HTTPD_SERVICE_NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + name: "${HTTPD_SERVICE_NAME}" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: "${NAME}" + name: "${NAME}" + spec: + containers: + - name: cloudforms + image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: APPLICATION_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: admin-password + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for CloudForms backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: cloudforms + image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Exposes the memcached server + spec: + ports: + - name: memcached + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: Defines how to deploy memcached + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: Remote database service + spec: + ports: + - name: postgresql + port: 5432 + targetPort: "${{DATABASE_PORT}}" + selector: {} +- apiVersion: v1 + kind: Endpoints + metadata: + name: "${DATABASE_SERVICE_NAME}" + subsets: + - addresses: + - ip: "${DATABASE_IP}" + ports: + - port: "${{DATABASE_PORT}}" + name: postgresql +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: cfme-privileged + serviceAccountName: cfme-privileged +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP + ServerName https://%{REQUEST_HOST} + + ProxyPreserveHost on + + RewriteCond %{REQUEST_URI} ^/ws [NC] + RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC] + RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC] + RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L] + + # For httpd, some ErrorDocuments must by served by the httpd pod + RewriteCond %{REQUEST_URI} !^/proxy_pages + + # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod + RewriteCond %{REQUEST_URI} !^/saml2 + RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L] + ProxyPassReverse / http://${NAME}/ + + # Ensures httpd stdout/stderr are seen by docker logs. + ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log" + CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common + </VirtualHost> + authentication.conf: | + # Load appropriate authentication configuration files + # + Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth" + configuration-internal-auth: | + # Internal authentication + # + configuration-external-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/http.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-active-directory-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/krb5.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-saml-auth: | + LoadModule auth_mellon_module modules/mod_auth_mellon.so + + <Location /> + MellonEnable "info" + + MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml" + + MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key" + MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert" + MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml" + + MellonVariable "sp-cookie" + MellonSecureCookie On + MellonCookiePath "/" + + MellonIdP "IDP" + + MellonEndpointPath "/saml2" + + MellonUser username + MellonMergeEnvVars On + + MellonSetEnvNoPrefix "REMOTE_USER" username + MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email + MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname + MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname + MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname + MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups + </Location> + + <Location /saml_login> + AuthType "Mellon" + MellonEnable "auth" + Require valid-user + </Location> + + Include "conf.d/external-auth-remote-user-conf" + external-auth-load-modules-conf: | + LoadModule authnz_pam_module modules/mod_authnz_pam.so + LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so + LoadModule lookup_identity_module modules/mod_lookup_identity.so + LoadModule auth_kerb_module modules/mod_auth_kerb.so + external-auth-login-form-conf: | + <Location /dashboard/external_authenticate> + InterceptFormPAMService httpd-auth + InterceptFormLogin user_name + InterceptFormPassword user_password + InterceptFormLoginSkip admin + InterceptFormClearRemoteUserForSkipped on + </Location> + external-auth-application-api-conf: | + <LocationMatch ^/api> + SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in + SetEnvIf X-Auth-Token '^.+$' let_api_token_in + SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in + + AuthType Basic + AuthName "External Authentication (httpd) for API" + AuthBasicProvider PAM + + AuthPAMService httpd-auth + Require valid-user + Order Allow,Deny + Allow from env=let_admin_in + Allow from env=let_api_token_in + Allow from env=let_sys_token_in + Satisfy Any + </LocationMatch> + external-auth-lookup-user-details-conf: | + <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api> + LookupUserAttr mail REMOTE_USER_EMAIL + LookupUserAttr givenname REMOTE_USER_FIRSTNAME + LookupUserAttr sn REMOTE_USER_LASTNAME + LookupUserAttr displayname REMOTE_USER_FULLNAME + LookupUserAttr domainname REMOTE_USER_DOMAIN + + LookupUserGroups REMOTE_USER_GROUPS ":" + LookupDbusTimeout 5000 + </LocationMatch> + external-auth-remote-user-conf: | + RequestHeader unset X_REMOTE_USER + + RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER + RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR + RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL + RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME + RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME + RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME + RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS + RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-kerberos-realms: undefined + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_DBUS_API_SERVICE_NAME}" + annotations: + description: Exposes the httpd server dbus api + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http-dbus-api + port: 8080 + targetPort: 8080 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 8080 + protocol: TCP + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + - name: HTTPD_AUTH_KERBEROS_REALMS + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-kerberos-realms + lifecycle: + postStart: + exec: + command: + - "/usr/bin/save-container-environment" + serviceAccount: cfme-httpd + serviceAccountName: cfme-httpd +parameters: +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: cloudforms +- name: V2_KEY + displayName: CloudForms Encryption Key + required: true + description: Encryption Key for CloudForms Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_IP + displayName: PostgreSQL Server IP + required: true + description: PostgreSQL external server IP used to configure service. + value: '' +- name: DATABASE_PORT + displayName: PostgreSQL Server Port + required: true + description: PostgreSQL external server port used to configure service. + value: '5432' +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: APPLICATION_ADMIN_PASSWORD + displayName: Application Admin Password + required: true + description: Admin password that will be set on the application. + value: smartvm +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: FRONTEND_APPLICATION_IMG_NAME + displayName: Frontend Application Image Name + description: This is the Frontend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui +- name: BACKEND_APPLICATION_IMG_NAME + displayName: Backend Application Image Name + description: This is the Backend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the CloudForms Frontend Application image tag/version requested to deploy. + value: latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the CloudForms Backend Application image tag/version requested to deploy. + value: latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_DBUS_API_SERVICE_NAME + required: true + displayName: Apache httpd DBus API Service Name + description: The name of httpd dbus api service. + value: httpd-dbus-api +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache httpd Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml index 3bc6c5813..5c757b6c2 100644 --- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml @@ -5,17 +5,308 @@ labels: metadata: name: cloudforms annotations: - description: "CloudForms appliance with persistent storage" - tags: "instant-app,cloudforms,cfme" - iconClass: "icon-rails" + description: CloudForms appliance with persistent storage + tags: instant-app,cloudforms,cfme + iconClass: icon-rails objects: - apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-orchestrator +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-anyuid +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-privileged +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cfme-httpd +- apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + admin-password: "${APPLICATION_ADMIN_PASSWORD}" + database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 + v2-key: "${V2_KEY}" +- apiVersion: v1 + kind: Secret + metadata: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + stringData: + rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}" + secret-key: "${ANSIBLE_SECRET_KEY}" + admin-password: "${ANSIBLE_ADMIN_PASSWORD}" +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${DATABASE_SERVICE_NAME}-configs" + data: + 01_miq_overrides.conf: | + #------------------------------------------------------------------------------ + # CONNECTIONS AND AUTHENTICATION + #------------------------------------------------------------------------------ + + tcp_keepalives_count = 9 + tcp_keepalives_idle = 3 + tcp_keepalives_interval = 75 + + #------------------------------------------------------------------------------ + # RESOURCE USAGE (except WAL) + #------------------------------------------------------------------------------ + + shared_preload_libraries = 'pglogical,repmgr_funcs' + max_worker_processes = 10 + + #------------------------------------------------------------------------------ + # WRITE AHEAD LOG + #------------------------------------------------------------------------------ + + wal_level = 'logical' + wal_log_hints = on + wal_buffers = 16MB + checkpoint_completion_target = 0.9 + + #------------------------------------------------------------------------------ + # REPLICATION + #------------------------------------------------------------------------------ + + max_wal_senders = 10 + wal_sender_timeout = 0 + max_replication_slots = 10 + hot_standby = on + + #------------------------------------------------------------------------------ + # ERROR REPORTING AND LOGGING + #------------------------------------------------------------------------------ + + log_filename = 'postgresql.log' + log_rotation_age = 0 + log_min_duration_statement = 5000 + log_connections = on + log_disconnections = on + log_line_prefix = '%t:%r:%c:%u@%d:[%p]:' + log_lock_waits = on + + #------------------------------------------------------------------------------ + # AUTOVACUUM PARAMETERS + #------------------------------------------------------------------------------ + + log_autovacuum_min_duration = 0 + autovacuum_naptime = 5min + autovacuum_vacuum_threshold = 500 + autovacuum_analyze_threshold = 500 + autovacuum_vacuum_scale_factor = 0.05 + + #------------------------------------------------------------------------------ + # LOCK MANAGEMENT + #------------------------------------------------------------------------------ + + deadlock_timeout = 5s + + #------------------------------------------------------------------------------ + # VERSION/PLATFORM COMPATIBILITY + #------------------------------------------------------------------------------ + + escape_string_warning = off + standard_conforming_strings = off +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-configs" + data: + application.conf: | + # Timeout: The number of seconds before receives and sends time out. + Timeout 120 + + RewriteEngine On + Options SymLinksIfOwnerMatch + + <VirtualHost *:80> + KeepAlive on + # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP + ServerName https://%{REQUEST_HOST} + + ProxyPreserveHost on + + RewriteCond %{REQUEST_URI} ^/ws [NC] + RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC] + RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC] + RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L] + + # For httpd, some ErrorDocuments must by served by the httpd pod + RewriteCond %{REQUEST_URI} !^/proxy_pages + + # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod + RewriteCond %{REQUEST_URI} !^/saml2 + RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L] + ProxyPassReverse / http://${NAME}/ + + # Ensures httpd stdout/stderr are seen by docker logs. + ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log" + CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common + </VirtualHost> + authentication.conf: | + # Load appropriate authentication configuration files + # + Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth" + configuration-internal-auth: | + # Internal authentication + # + configuration-external-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/http.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-active-directory-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/krb5.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-saml-auth: | + LoadModule auth_mellon_module modules/mod_auth_mellon.so + + <Location /> + MellonEnable "info" + + MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml" + + MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key" + MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert" + MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml" + + MellonVariable "sp-cookie" + MellonSecureCookie On + MellonCookiePath "/" + + MellonIdP "IDP" + + MellonEndpointPath "/saml2" + + MellonUser username + MellonMergeEnvVars On + + MellonSetEnvNoPrefix "REMOTE_USER" username + MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email + MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname + MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname + MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname + MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups + </Location> + + <Location /saml_login> + AuthType "Mellon" + MellonEnable "auth" + Require valid-user + </Location> + + Include "conf.d/external-auth-remote-user-conf" + external-auth-load-modules-conf: | + LoadModule authnz_pam_module modules/mod_authnz_pam.so + LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so + LoadModule lookup_identity_module modules/mod_lookup_identity.so + LoadModule auth_kerb_module modules/mod_auth_kerb.so + external-auth-login-form-conf: | + <Location /dashboard/external_authenticate> + InterceptFormPAMService httpd-auth + InterceptFormLogin user_name + InterceptFormPassword user_password + InterceptFormLoginSkip admin + InterceptFormClearRemoteUserForSkipped on + </Location> + external-auth-application-api-conf: | + <LocationMatch ^/api> + SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in + SetEnvIf X-Auth-Token '^.+$' let_api_token_in + SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in + + AuthType Basic + AuthName "External Authentication (httpd) for API" + AuthBasicProvider PAM + + AuthPAMService httpd-auth + Require valid-user + Order Allow,Deny + Allow from env=let_admin_in + Allow from env=let_api_token_in + Allow from env=let_sys_token_in + Satisfy Any + </LocationMatch> + external-auth-lookup-user-details-conf: | + <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api> + LookupUserAttr mail REMOTE_USER_EMAIL + LookupUserAttr givenname REMOTE_USER_FIRSTNAME + LookupUserAttr sn REMOTE_USER_LASTNAME + LookupUserAttr displayname REMOTE_USER_FULLNAME + LookupUserAttr domainname REMOTE_USER_DOMAIN + + LookupUserGroups REMOTE_USER_GROUPS ":" + LookupDbusTimeout 5000 + </LocationMatch> + external-auth-remote-user-conf: | + RequestHeader unset X_REMOTE_USER + + RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER + RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR + RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL + RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME + RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME + RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME + RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS + RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN +- apiVersion: v1 + kind: ConfigMap + metadata: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + data: + auth-type: internal + auth-kerberos-realms: undefined + auth-configuration.conf: | + # External Authentication Configuration File + # + # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication +- apiVersion: v1 kind: Service metadata: annotations: - description: "Exposes and load balances CloudForms pods" + description: Exposes and load balances CloudForms pods service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' - name: ${NAME} + name: "${NAME}" spec: clusterIP: None ports: @@ -23,141 +314,97 @@ objects: port: 80 protocol: TCP targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 selector: - name: ${NAME} + name: "${NAME}" - apiVersion: v1 kind: Route metadata: - name: ${NAME} + name: "${HTTPD_SERVICE_NAME}" spec: - host: ${APPLICATION_DOMAIN} + host: "${APPLICATION_DOMAIN}" port: - targetPort: https + targetPort: http tls: - termination: passthrough + termination: edge + insecureEdgeTerminationPolicy: Redirect to: kind: Service - name: ${NAME} -- apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-app - annotations: - description: "Keeps track of changes in the CloudForms app image" - spec: - dockerImageRepository: "${APPLICATION_IMG_NAME}" -- apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-postgresql - annotations: - description: "Keeps track of changes in the CloudForms postgresql image" - spec: - dockerImageRepository: "${POSTGRESQL_IMG_NAME}" -- apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-memcached - annotations: - description: "Keeps track of changes in the CloudForms memcached image" - spec: - dockerImageRepository: "${MEMCACHED_IMG_NAME}" + name: "${HTTPD_SERVICE_NAME}" - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: "${NAME}-${DATABASE_SERVICE_NAME}" spec: accessModes: - - ReadWriteOnce + - ReadWriteOnce resources: requests: - storage: ${DATABASE_VOLUME_CAPACITY} -- apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: "${NAME}-region" - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: ${APPLICATION_REGION_VOLUME_CAPACITY} + storage: "${DATABASE_VOLUME_CAPACITY}" - apiVersion: apps/v1beta1 - kind: "StatefulSet" + kind: StatefulSet metadata: - name: ${NAME} + name: "${NAME}" annotations: - description: "Defines how to deploy the CloudForms appliance" + description: Defines how to deploy the CloudForms appliance spec: serviceName: "${NAME}" - replicas: 1 + replicas: "${APPLICATION_REPLICA_COUNT}" template: metadata: labels: - name: ${NAME} - name: ${NAME} + name: "${NAME}" + name: "${NAME}" spec: containers: - name: cloudforms - image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}" + image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" livenessProbe: - tcpSocket: - port: 443 + exec: + command: + - pidof + - MIQ Server initialDelaySeconds: 480 timeoutSeconds: 3 readinessProbe: - httpGet: - path: / - port: 443 - scheme: HTTPS + tcpSocket: + port: 80 initialDelaySeconds: 200 timeoutSeconds: 3 ports: - containerPort: 80 protocol: TCP - - containerPort: 443 - protocol: TCP - securityContext: - privileged: true volumeMounts: - - - name: "${NAME}-server" - mountPath: "/persistent" - - - name: "${NAME}-region" - mountPath: "/persistent-region" + - name: "${NAME}-server" + mountPath: "/persistent" env: - - - name: "APPLICATION_INIT_DELAY" - value: "${APPLICATION_INIT_DELAY}" - - - name: "DATABASE_SERVICE_NAME" - value: "${DATABASE_SERVICE_NAME}" - - - name: "DATABASE_REGION" - value: "${DATABASE_REGION}" - - - name: "MEMCACHED_SERVICE_NAME" - value: "${MEMCACHED_SERVICE_NAME}" - - - name: "POSTGRESQL_USER" - value: "${DATABASE_USER}" - - - name: "POSTGRESQL_PASSWORD" - value: "${DATABASE_PASSWORD}" - - - name: "POSTGRESQL_DATABASE" - value: "${DATABASE_NAME}" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - value: "${POSTGRESQL_MAX_CONNECTIONS}" - - - name: "POSTGRESQL_SHARED_BUFFERS" - value: "${POSTGRESQL_SHARED_BUFFERS}" + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_REGION + value: "${DATABASE_REGION}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: APPLICATION_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: admin-password + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password resources: requests: memory: "${APPLICATION_MEM_REQ}" @@ -168,59 +415,128 @@ objects: preStop: exec: command: - - /opt/rh/cfme-container-scripts/sync-pv-data - volumes: - - - name: "${NAME}-region" - persistentVolumeClaim: - claimName: ${NAME}-region + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 volumeClaimTemplates: - - metadata: - name: "${NAME}-server" - annotations: - # Uncomment this if using dynamic volume provisioning. - # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html - # volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ ReadWriteOnce ] + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Headless service for CloudForms backend pods + name: "${NAME}-backend" + spec: + clusterIP: None + selector: + name: "${NAME}-backend" +- apiVersion: apps/v1beta1 + kind: StatefulSet + metadata: + name: "${NAME}-backend" + annotations: + description: Defines how to deploy the CloudForms appliance + spec: + serviceName: "${NAME}-backend" + replicas: 0 + template: + metadata: + labels: + name: "${NAME}-backend" + name: "${NAME}-backend" + spec: + containers: + - name: cloudforms + image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}" + livenessProbe: + exec: + command: + - pidof + - MIQ Server + initialDelaySeconds: 480 + timeoutSeconds: 3 + volumeMounts: + - name: "${NAME}-server" + mountPath: "/persistent" + env: + - name: APPLICATION_INIT_DELAY + value: "${APPLICATION_INIT_DELAY}" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: database-url + - name: MIQ_SERVER_DEFAULT_ROLES + value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate + - name: FRONTEND_SERVICE_NAME + value: "${NAME}" + - name: V2_KEY + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: v2-key + - name: ANSIBLE_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password resources: requests: - storage: "${APPLICATION_VOLUME_CAPACITY}" + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - "/opt/rh/cfme-container-scripts/sync-pv-data" + serviceAccount: cfme-orchestrator + serviceAccountName: cfme-orchestrator + terminationGracePeriodSeconds: 90 + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" - apiVersion: v1 - kind: "Service" + kind: Service metadata: name: "${MEMCACHED_SERVICE_NAME}" annotations: - description: "Exposes the memcached server" + description: Exposes the memcached server spec: ports: - - - name: "memcached" - port: 11211 - targetPort: 11211 + - name: memcached + port: 11211 + targetPort: 11211 selector: name: "${MEMCACHED_SERVICE_NAME}" - apiVersion: v1 - kind: "DeploymentConfig" + kind: DeploymentConfig metadata: name: "${MEMCACHED_SERVICE_NAME}" annotations: - description: "Defines how to deploy memcached" + description: Defines how to deploy memcached spec: strategy: - type: "Recreate" + type: Recreate triggers: - - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "memcached" - from: - kind: "ImageStreamTag" - name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}" - - - type: "ConfigChange" + - type: ConfigChange replicas: 1 selector: name: "${MEMCACHED_SERVICE_NAME}" @@ -232,74 +548,58 @@ objects: spec: volumes: [] containers: - - - name: "memcached" - image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" - ports: - - - containerPort: 11211 - readinessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 5 - tcpSocket: - port: 11211 - livenessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 30 - tcpSocket: - port: 11211 - volumeMounts: [] - env: - - - name: "MEMCACHED_MAX_MEMORY" - value: "${MEMCACHED_MAX_MEMORY}" - - - name: "MEMCACHED_MAX_CONNECTIONS" - value: "${MEMCACHED_MAX_CONNECTIONS}" - - - name: "MEMCACHED_SLAB_PAGE_SIZE" - value: "${MEMCACHED_SLAB_PAGE_SIZE}" - resources: - requests: - memory: "${MEMCACHED_MEM_REQ}" - cpu: "${MEMCACHED_CPU_REQ}" - limits: - memory: "${MEMCACHED_MEM_LIMIT}" + - name: memcached + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - name: MEMCACHED_MAX_MEMORY + value: "${MEMCACHED_MAX_MEMORY}" + - name: MEMCACHED_MAX_CONNECTIONS + value: "${MEMCACHED_MAX_CONNECTIONS}" + - name: MEMCACHED_SLAB_PAGE_SIZE + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" - apiVersion: v1 - kind: "Service" + kind: Service metadata: name: "${DATABASE_SERVICE_NAME}" annotations: - description: "Exposes the database server" + description: Exposes the database server spec: ports: - - - name: "postgresql" - port: 5432 - targetPort: 5432 + - name: postgresql + port: 5432 + targetPort: 5432 selector: name: "${DATABASE_SERVICE_NAME}" - apiVersion: v1 - kind: "DeploymentConfig" + kind: DeploymentConfig metadata: name: "${DATABASE_SERVICE_NAME}" annotations: - description: "Defines how to deploy the database" + description: Defines how to deploy the database spec: strategy: - type: "Recreate" + type: Recreate triggers: - - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "postgresql" - from: - kind: "ImageStreamTag" - name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}" - - - type: "ConfigChange" + - type: ConfigChange replicas: 1 selector: name: "${DATABASE_SERVICE_NAME}" @@ -310,236 +610,524 @@ objects: name: "${DATABASE_SERVICE_NAME}" spec: volumes: - - - name: "cfme-pgdb-volume" - persistentVolumeClaim: - claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + - name: cfme-pgdb-volume + persistentVolumeClaim: + claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + - name: cfme-pg-configs + configMap: + name: "${DATABASE_SERVICE_NAME}-configs" containers: - - - name: "postgresql" - image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" - ports: - - - containerPort: 5432 - readinessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 15 + - name: postgresql + image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" + ports: + - containerPort: 5432 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 15 + exec: + command: + - "/bin/sh" + - "-i" + - "-c" + - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1' + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 60 + tcpSocket: + port: 5432 + volumeMounts: + - name: cfme-pgdb-volume + mountPath: "/var/lib/pgsql/data" + - name: cfme-pg-configs + mountPath: "${POSTGRESQL_CONFIG_DIR}" + env: + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${DATABASE_NAME}" + - name: POSTGRESQL_MAX_CONNECTIONS + value: "${POSTGRESQL_MAX_CONNECTIONS}" + - name: POSTGRESQL_SHARED_BUFFERS + value: "${POSTGRESQL_SHARED_BUFFERS}" + - name: POSTGRESQL_CONFIG_DIR + value: "${POSTGRESQL_CONFIG_DIR}" + resources: + requests: + memory: "${POSTGRESQL_MEM_REQ}" + cpu: "${POSTGRESQL_CPU_REQ}" + limits: + memory: "${POSTGRESQL_MEM_LIMIT}" +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances Ansible pods + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: "${ANSIBLE_SERVICE_NAME}" + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: "${ANSIBLE_SERVICE_NAME}" +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${ANSIBLE_SERVICE_NAME}" + annotations: + description: Defines how to deploy the Ansible appliance + spec: + strategy: + type: Recreate + serviceName: "${ANSIBLE_SERVICE_NAME}" + replicas: 0 + template: + metadata: + labels: + name: "${ANSIBLE_SERVICE_NAME}" + name: "${ANSIBLE_SERVICE_NAME}" + spec: + containers: + - name: ansible + image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: "/" + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + env: + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: admin-password + - name: RABBITMQ_USER_NAME + value: "${ANSIBLE_RABBITMQ_USER_NAME}" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: rabbit-password + - name: ANSIBLE_SECRET_KEY + valueFrom: + secretKeyRef: + name: "${ANSIBLE_SERVICE_NAME}-secrets" + key: secret-key + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: POSTGRESQL_USER + value: "${DATABASE_USER}" + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: pg-password + - name: POSTGRESQL_DATABASE + value: "${ANSIBLE_DATABASE_NAME}" + resources: + requests: + memory: "${ANSIBLE_MEM_REQ}" + cpu: "${ANSIBLE_CPU_REQ}" + limits: + memory: "${ANSIBLE_MEM_LIMIT}" + serviceAccount: cfme-privileged + serviceAccountName: cfme-privileged +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Exposes the httpd server + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + name: httpd +- apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_DBUS_API_SERVICE_NAME}" + annotations: + description: Exposes the httpd server dbus api + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http-dbus-api + port: 8080 + targetPort: 8080 + selector: + name: httpd +- apiVersion: v1 + kind: DeploymentConfig + metadata: + name: "${HTTPD_SERVICE_NAME}" + annotations: + description: Defines how to deploy httpd + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1200 + triggers: + - type: ConfigChange + replicas: 1 + selector: + name: "${HTTPD_SERVICE_NAME}" + template: + metadata: + name: "${HTTPD_SERVICE_NAME}" + labels: + name: "${HTTPD_SERVICE_NAME}" + spec: + volumes: + - name: httpd-config + configMap: + name: "${HTTPD_SERVICE_NAME}-configs" + - name: httpd-auth-config + configMap: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + containers: + - name: httpd + image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 8080 + protocol: TCP + livenessProbe: + exec: + command: + - pidof + - httpd + initialDelaySeconds: 15 + timeoutSeconds: 3 + readinessProbe: + tcpSocket: + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: httpd-config + mountPath: "${HTTPD_CONFIG_DIR}" + - name: httpd-auth-config + mountPath: "${HTTPD_AUTH_CONFIG_DIR}" + resources: + requests: + memory: "${HTTPD_MEM_REQ}" + cpu: "${HTTPD_CPU_REQ}" + limits: + memory: "${HTTPD_MEM_LIMIT}" + env: + - name: HTTPD_AUTH_TYPE + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-type + - name: HTTPD_AUTH_KERBEROS_REALMS + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-kerberos-realms + lifecycle: + postStart: exec: command: - - "/bin/sh" - - "-i" - - "-c" - - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'" - livenessProbe: - timeoutSeconds: 1 - initialDelaySeconds: 60 - tcpSocket: - port: 5432 - volumeMounts: - - - name: "cfme-pgdb-volume" - mountPath: "/var/lib/pgsql/data" - env: - - - name: "POSTGRESQL_USER" - value: "${DATABASE_USER}" - - - name: "POSTGRESQL_PASSWORD" - value: "${DATABASE_PASSWORD}" - - - name: "POSTGRESQL_DATABASE" - value: "${DATABASE_NAME}" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - value: "${POSTGRESQL_MAX_CONNECTIONS}" - - - name: "POSTGRESQL_SHARED_BUFFERS" - value: "${POSTGRESQL_SHARED_BUFFERS}" - resources: - requests: - memory: "${POSTGRESQL_MEM_REQ}" - cpu: "${POSTGRESQL_CPU_REQ}" - limits: - memory: "${POSTGRESQL_MEM_LIMIT}" - + - "/usr/bin/save-container-environment" + serviceAccount: cfme-httpd + serviceAccountName: cfme-httpd parameters: - - - name: "NAME" - displayName: Name - required: true - description: "The name assigned to all of the frontend objects defined in this template." - value: cloudforms - - - name: "DATABASE_SERVICE_NAME" - displayName: "PostgreSQL Service Name" - required: true - description: "The name of the OpenShift Service exposed for the PostgreSQL container." - value: "postgresql" - - - name: "DATABASE_USER" - displayName: "PostgreSQL User" - required: true - description: "PostgreSQL user that will access the database." - value: "root" - - - name: "DATABASE_PASSWORD" - displayName: "PostgreSQL Password" - required: true - description: "Password for the PostgreSQL user." - value: "smartvm" - - - name: "DATABASE_NAME" - required: true - displayName: "PostgreSQL Database Name" - description: "Name of the PostgreSQL database accessed." - value: "vmdb_production" - - - name: "DATABASE_REGION" - required: true - displayName: "Application Database Region" - description: "Database region that will be used for application." - value: "0" - - - name: "MEMCACHED_SERVICE_NAME" - required: true - displayName: "Memcached Service Name" - description: "The name of the OpenShift Service exposed for the Memcached container." - value: "memcached" - - - name: "MEMCACHED_MAX_MEMORY" - displayName: "Memcached Max Memory" - description: "Memcached maximum memory for memcached object storage in MB." - value: "64" - - - name: "MEMCACHED_MAX_CONNECTIONS" - displayName: "Memcached Max Connections" - description: "Memcached maximum number of connections allowed." - value: "1024" - - - name: "MEMCACHED_SLAB_PAGE_SIZE" - displayName: "Memcached Slab Page Size" - description: "Memcached size of each slab page." - value: "1m" - - - name: "POSTGRESQL_MAX_CONNECTIONS" - displayName: "PostgreSQL Max Connections" - description: "PostgreSQL maximum number of database connections allowed." - value: "100" - - - name: "POSTGRESQL_SHARED_BUFFERS" - displayName: "PostgreSQL Shared Buffer Amount" - description: "Amount of memory dedicated for PostgreSQL shared memory buffers." - value: "256MB" - - - name: "APPLICATION_CPU_REQ" - displayName: "Application Min CPU Requested" - required: true - description: "Minimum amount of CPU time the Application container will need (expressed in millicores)." - value: "1000m" - - - name: "POSTGRESQL_CPU_REQ" - displayName: "PostgreSQL Min CPU Requested" - required: true - description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)." - value: "500m" - - - name: "MEMCACHED_CPU_REQ" - displayName: "Memcached Min CPU Requested" - required: true - description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)." - value: "200m" - - - name: "APPLICATION_MEM_REQ" - displayName: "Application Min RAM Requested" - required: true - description: "Minimum amount of memory the Application container will need." - value: "6144Mi" - - - name: "POSTGRESQL_MEM_REQ" - displayName: "PostgreSQL Min RAM Requested" - required: true - description: "Minimum amount of memory the PostgreSQL container will need." - value: "1024Mi" - - - name: "MEMCACHED_MEM_REQ" - displayName: "Memcached Min RAM Requested" - required: true - description: "Minimum amount of memory the Memcached container will need." - value: "64Mi" - - - name: "APPLICATION_MEM_LIMIT" - displayName: "Application Max RAM Limit" - required: true - description: "Maximum amount of memory the Application container can consume." - value: "16384Mi" - - - name: "POSTGRESQL_MEM_LIMIT" - displayName: "PostgreSQL Max RAM Limit" - required: true - description: "Maximum amount of memory the PostgreSQL container can consume." - value: "8192Mi" - - - name: "MEMCACHED_MEM_LIMIT" - displayName: "Memcached Max RAM Limit" - required: true - description: "Maximum amount of memory the Memcached container can consume." - value: "256Mi" - - - name: "POSTGRESQL_IMG_NAME" - displayName: "PostgreSQL Image Name" - description: "This is the PostgreSQL image name requested to deploy." - value: "registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql" - - - name: "POSTGRESQL_IMG_TAG" - displayName: "PostgreSQL Image Tag" - description: "This is the PostgreSQL image tag/version requested to deploy." - value: "latest" - - - name: "MEMCACHED_IMG_NAME" - displayName: "Memcached Image Name" - description: "This is the Memcached image name requested to deploy." - value: "registry.access.redhat.com/cloudforms45/cfme-openshift-memcached" - - - name: "MEMCACHED_IMG_TAG" - displayName: "Memcached Image Tag" - description: "This is the Memcached image tag/version requested to deploy." - value: "latest" - - - name: "APPLICATION_IMG_NAME" - displayName: "Application Image Name" - description: "This is the Application image name requested to deploy." - value: "registry.access.redhat.com/cloudforms45/cfme-openshift-app" - - - name: "APPLICATION_IMG_TAG" - displayName: "Application Image Tag" - description: "This is the Application image tag/version requested to deploy." - value: "latest" - - - name: "APPLICATION_DOMAIN" - displayName: "Application Hostname" - description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted." - value: "" - - - name: "APPLICATION_INIT_DELAY" - displayName: "Application Init Delay" - required: true - description: "Delay in seconds before we attempt to initialize the application." - value: "15" - - - name: "APPLICATION_VOLUME_CAPACITY" - displayName: "Application Volume Capacity" - required: true - description: "Volume space available for application data." - value: "5Gi" - - - name: "APPLICATION_REGION_VOLUME_CAPACITY" - displayName: "Application Region Volume Capacity" - required: true - description: "Volume space available for region application data." - value: "5Gi" - - - name: "DATABASE_VOLUME_CAPACITY" - displayName: "Database Volume Capacity" - required: true - description: "Volume space available for database." - value: "15Gi" +- name: NAME + displayName: Name + required: true + description: The name assigned to all of the frontend objects defined in this template. + value: cloudforms +- name: V2_KEY + displayName: CloudForms Encryption Key + required: true + description: Encryption Key for CloudForms Passwords + from: "[a-zA-Z0-9]{43}" + generate: expression +- name: DATABASE_SERVICE_NAME + displayName: PostgreSQL Service Name + required: true + description: The name of the OpenShift Service exposed for the PostgreSQL container. + value: postgresql +- name: DATABASE_USER + displayName: PostgreSQL User + required: true + description: PostgreSQL user that will access the database. + value: root +- name: DATABASE_PASSWORD + displayName: PostgreSQL Password + required: true + description: Password for the PostgreSQL user. + from: "[a-zA-Z0-9]{8}" + generate: expression +- name: DATABASE_NAME + required: true + displayName: PostgreSQL Database Name + description: Name of the PostgreSQL database accessed. + value: vmdb_production +- name: DATABASE_REGION + required: true + displayName: Application Database Region + description: Database region that will be used for application. + value: '0' +- name: APPLICATION_ADMIN_PASSWORD + displayName: Application Admin Password + required: true + description: Admin password that will be set on the application. + value: smartvm +- name: ANSIBLE_DATABASE_NAME + displayName: Ansible PostgreSQL database name + required: true + description: The database to be used by the Ansible continer + value: awx +- name: MEMCACHED_SERVICE_NAME + required: true + displayName: Memcached Service Name + description: The name of the OpenShift Service exposed for the Memcached container. + value: memcached +- name: MEMCACHED_MAX_MEMORY + displayName: Memcached Max Memory + description: Memcached maximum memory for memcached object storage in MB. + value: '64' +- name: MEMCACHED_MAX_CONNECTIONS + displayName: Memcached Max Connections + description: Memcached maximum number of connections allowed. + value: '1024' +- name: MEMCACHED_SLAB_PAGE_SIZE + displayName: Memcached Slab Page Size + description: Memcached size of each slab page. + value: 1m +- name: POSTGRESQL_CONFIG_DIR + displayName: PostgreSQL Configuration Overrides + description: Directory used to store PostgreSQL configuration overrides. + value: "/var/lib/pgsql/conf.d" +- name: POSTGRESQL_MAX_CONNECTIONS + displayName: PostgreSQL Max Connections + description: PostgreSQL maximum number of database connections allowed. + value: '1000' +- name: POSTGRESQL_SHARED_BUFFERS + displayName: PostgreSQL Shared Buffer Amount + description: Amount of memory dedicated for PostgreSQL shared memory buffers. + value: 1GB +- name: ANSIBLE_SERVICE_NAME + displayName: Ansible Service Name + description: The name of the OpenShift Service exposed for the Ansible container. + value: ansible +- name: ANSIBLE_ADMIN_PASSWORD + displayName: Ansible admin User password + required: true + description: The password for the Ansible container admin user + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: ANSIBLE_SECRET_KEY + displayName: Ansible Secret Key + required: true + description: Encryption key for the Ansible container + from: "[a-f0-9]{32}" + generate: expression +- name: ANSIBLE_RABBITMQ_USER_NAME + displayName: RabbitMQ Username + required: true + description: Username for the Ansible RabbitMQ Server + value: ansible +- name: ANSIBLE_RABBITMQ_PASSWORD + displayName: RabbitMQ Server Password + required: true + description: Password for the Ansible RabbitMQ Server + from: "[a-zA-Z0-9]{32}" + generate: expression +- name: APPLICATION_CPU_REQ + displayName: Application Min CPU Requested + required: true + description: Minimum amount of CPU time the Application container will need (expressed in millicores). + value: 1000m +- name: POSTGRESQL_CPU_REQ + displayName: PostgreSQL Min CPU Requested + required: true + description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores). + value: 500m +- name: MEMCACHED_CPU_REQ + displayName: Memcached Min CPU Requested + required: true + description: Minimum amount of CPU time the Memcached container will need (expressed in millicores). + value: 200m +- name: ANSIBLE_CPU_REQ + displayName: Ansible Min CPU Requested + required: true + description: Minimum amount of CPU time the Ansible container will need (expressed in millicores). + value: 1000m +- name: APPLICATION_MEM_REQ + displayName: Application Min RAM Requested + required: true + description: Minimum amount of memory the Application container will need. + value: 6144Mi +- name: POSTGRESQL_MEM_REQ + displayName: PostgreSQL Min RAM Requested + required: true + description: Minimum amount of memory the PostgreSQL container will need. + value: 4Gi +- name: MEMCACHED_MEM_REQ + displayName: Memcached Min RAM Requested + required: true + description: Minimum amount of memory the Memcached container will need. + value: 64Mi +- name: ANSIBLE_MEM_REQ + displayName: Ansible Min RAM Requested + required: true + description: Minimum amount of memory the Ansible container will need. + value: 2048Mi +- name: APPLICATION_MEM_LIMIT + displayName: Application Max RAM Limit + required: true + description: Maximum amount of memory the Application container can consume. + value: 16384Mi +- name: POSTGRESQL_MEM_LIMIT + displayName: PostgreSQL Max RAM Limit + required: true + description: Maximum amount of memory the PostgreSQL container can consume. + value: 8Gi +- name: MEMCACHED_MEM_LIMIT + displayName: Memcached Max RAM Limit + required: true + description: Maximum amount of memory the Memcached container can consume. + value: 256Mi +- name: ANSIBLE_MEM_LIMIT + displayName: Ansible Max RAM Limit + required: true + description: Maximum amount of memory the Ansible container can consume. + value: 8096Mi +- name: POSTGRESQL_IMG_NAME + displayName: PostgreSQL Image Name + description: This is the PostgreSQL image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql +- name: POSTGRESQL_IMG_TAG + displayName: PostgreSQL Image Tag + description: This is the PostgreSQL image tag/version requested to deploy. + value: latest +- name: MEMCACHED_IMG_NAME + displayName: Memcached Image Name + description: This is the Memcached image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached +- name: MEMCACHED_IMG_TAG + displayName: Memcached Image Tag + description: This is the Memcached image tag/version requested to deploy. + value: latest +- name: FRONTEND_APPLICATION_IMG_NAME + displayName: Frontend Application Image Name + description: This is the Frontend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui +- name: BACKEND_APPLICATION_IMG_NAME + displayName: Backend Application Image Name + description: This is the Backend Application image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app +- name: FRONTEND_APPLICATION_IMG_TAG + displayName: Front end Application Image Tag + description: This is the CloudForms Frontend Application image tag/version requested to deploy. + value: latest +- name: BACKEND_APPLICATION_IMG_TAG + displayName: Back end Application Image Tag + description: This is the CloudForms Backend Application image tag/version requested to deploy. + value: latest +- name: ANSIBLE_IMG_NAME + displayName: Ansible Image Name + description: This is the Ansible image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible +- name: ANSIBLE_IMG_TAG + displayName: Ansible Image Tag + description: This is the Ansible image tag/version requested to deploy. + value: latest +- name: APPLICATION_DOMAIN + displayName: Application Hostname + description: The exposed hostname that will route to the application service, if left blank a value will be defaulted. + value: '' +- name: APPLICATION_REPLICA_COUNT + displayName: Application Replica Count + description: This is the number of Application replicas requested to deploy. + value: '1' +- name: APPLICATION_INIT_DELAY + displayName: Application Init Delay + required: true + description: Delay in seconds before we attempt to initialize the application. + value: '15' +- name: APPLICATION_VOLUME_CAPACITY + displayName: Application Volume Capacity + required: true + description: Volume space available for application data. + value: 5Gi +- name: DATABASE_VOLUME_CAPACITY + displayName: Database Volume Capacity + required: true + description: Volume space available for database. + value: 15Gi +- name: HTTPD_SERVICE_NAME + required: true + displayName: Apache httpd Service Name + description: The name of the OpenShift Service exposed for the httpd container. + value: httpd +- name: HTTPD_DBUS_API_SERVICE_NAME + required: true + displayName: Apache httpd DBus API Service Name + description: The name of httpd dbus api service. + value: httpd-dbus-api +- name: HTTPD_IMG_NAME + displayName: Apache httpd Image Name + description: This is the httpd image name requested to deploy. + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd +- name: HTTPD_IMG_TAG + displayName: Apache httpd Image Tag + description: This is the httpd image tag/version requested to deploy. + value: latest +- name: HTTPD_CONFIG_DIR + displayName: Apache Configuration Directory + description: Directory used to store the Apache configuration files. + value: "/etc/httpd/conf.d" +- name: HTTPD_AUTH_CONFIG_DIR + displayName: External Authentication Configuration Directory + description: Directory used to store the external authentication configuration files. + value: "/etc/httpd/auth-conf.d" +- name: HTTPD_CPU_REQ + displayName: Apache httpd Min CPU Requested + required: true + description: Minimum amount of CPU time the httpd container will need (expressed in millicores). + value: 500m +- name: HTTPD_MEM_REQ + displayName: Apache httpd Min RAM Requested + required: true + description: Minimum amount of memory the httpd container will need. + value: 512Mi +- name: HTTPD_MEM_LIMIT + displayName: Apache httpd Max RAM Limit + required: true + description: Maximum amount of memory the httpd container can consume. + value: 8192Mi diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-ephemeral-template.json index 5e7585eeb..1772dbbcf 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-ephemeral-template.json @@ -5,16 +5,16 @@ "name": "mariadb-ephemeral", "annotations": { "openshift.io/display-name": "MariaDB (Ephemeral)", - "description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", "iconClass": "icon-mariadb", "tags": "database,mariadb", "openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", "openshift.io/provider-display-name": "Red Hat, Inc.", - "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md", + "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md", "openshift.io/support-url": "https://access.redhat.com" } }, - "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.", + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md.", "labels": { "template": "mariadb-persistent-template" }, @@ -82,7 +82,7 @@ ], "from": { "kind": "ImageStreamTag", - "name": "mariadb:10.1", + "name": "mariadb:${MARIADB_VERSION}", "namespace": "${NAMESPACE}" } } @@ -242,6 +242,13 @@ "description": "Name of the MariaDB database accessed.", "value": "sampledb", "required": true + }, + { + "name": "MARIADB_VERSION", + "displayName": "Version of MariaDB Image", + "description": "Version of MariaDB image to be used (10.0, 10.1, 10.2 or latest).", + "value": "10.2", + "required": true } ] } diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json index 92be8f42e..8424ecbc8 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json @@ -5,16 +5,16 @@ "name": "mariadb-persistent", "annotations": { "openshift.io/display-name": "MariaDB", - "description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", + "description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mariadb", "tags": "database,mariadb", "openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", "openshift.io/provider-display-name": "Red Hat, Inc.", - "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md", + "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md", "openshift.io/support-url": "https://access.redhat.com" } }, - "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.", + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md.", "labels": { "template": "mariadb-persistent-template" }, @@ -99,7 +99,7 @@ ], "from": { "kind": "ImageStreamTag", - "name": "mariadb:10.1", + "name": "mariadb:${MARIADB_VERSION}", "namespace": "${NAMESPACE}" } } @@ -261,6 +261,13 @@ "required": true }, { + "name": "MARIADB_VERSION", + "displayName": "Version of MariaDB Image", + "description": "Version of MariaDB image to be used (10.0, 10.1, 10.2 or latest).", + "value": "10.2", + "required": true + }, + { "name": "VOLUME_CAPACITY", "displayName": "Volume Capacity", "description": "Volume space available for data, e.g. 512Mi, 2Gi.", diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-ephemeral-template.json index c0946416d..bed940a37 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-ephemeral-template.json @@ -5,7 +5,7 @@ "name": "mysql-ephemeral", "annotations": { "openshift.io/display-name": "MySQL (Ephemeral)", - "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/root/usr/share/container-scripts/mysql/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", "iconClass": "icon-mysql-database", "tags": "database,mysql", "openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", @@ -14,7 +14,7 @@ "openshift.io/support-url": "https://access.redhat.com" } }, - "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.", + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/root/usr/share/container-scripts/mysql/README.md.", "labels": { "template": "mysql-ephemeral-template" }, diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json index 6ac80f3a0..85adde65d 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json @@ -5,7 +5,7 @@ "name": "mysql-persistent", "annotations": { "openshift.io/display-name": "MySQL", - "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", + "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/root/usr/share/container-scripts/mysql/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mysql-database", "tags": "database,mysql", "openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", @@ -14,7 +14,7 @@ "openshift.io/support-url": "https://access.redhat.com" } }, - "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.", + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/root/usr/share/container-scripts/mysql/README.md.", "labels": { "template": "mysql-persistent-template" }, diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-ephemeral-template.json index 7c419f1ae..f29698d0c 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-ephemeral-template.json @@ -5,7 +5,7 @@ "name": "postgresql-ephemeral", "annotations": { "openshift.io/display-name": "PostgreSQL (Ephemeral)", - "description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", "iconClass": "icon-postgresql", "tags": "database,postgresql", "openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", @@ -14,7 +14,7 @@ "openshift.io/support-url": "https://access.redhat.com" } }, - "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.", + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/.", "labels": { "template": "postgresql-ephemeral-template" }, @@ -127,11 +127,11 @@ } }, "livenessProbe": { + "exec": { + "command": [ "/bin/sh", "-i", "-c", "pg_isready -h 127.0.0.1 -p 5432" ] + }, "timeoutSeconds": 1, - "initialDelaySeconds": 30, - "tcpSocket": { - "port": 5432 - } + "initialDelaySeconds": 30 }, "env": [ { @@ -245,8 +245,8 @@ { "name": "POSTGRESQL_VERSION", "displayName": "Version of PostgreSQL Image", - "description": "Version of PostgreSQL image to be used (9.2, 9.4, 9.5 or latest).", - "value": "9.5", + "description": "Version of PostgreSQL image to be used (9.4, 9.5, 9.6 or latest).", + "value": "9.6", "required": true } ] diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json index 190509112..7feeb704a 100644 --- a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json @@ -5,7 +5,7 @@ "name": "postgresql-persistent", "annotations": { "openshift.io/display-name": "PostgreSQL", - "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", + "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-postgresql", "tags": "database,postgresql", "openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", @@ -14,7 +14,7 @@ "openshift.io/support-url": "https://access.redhat.com" } }, - "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.", + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/.", "labels": { "template": "postgresql-persistent-template" }, @@ -144,11 +144,11 @@ } }, "livenessProbe": { + "exec": { + "command": [ "/bin/sh", "-i", "-c", "pg_isready -h 127.0.0.1 -p 5432" ] + }, "timeoutSeconds": 1, - "initialDelaySeconds": 30, - "tcpSocket": { - "port": 5432 - } + "initialDelaySeconds": 30 }, "env": [ { @@ -269,8 +269,8 @@ { "name": "POSTGRESQL_VERSION", "displayName": "Version of PostgreSQL Image", - "description": "Version of PostgreSQL image to be used (9.2, 9.4, 9.5 or latest).", - "value": "9.5", + "description": "Version of PostgreSQL image to be used (9.4, 9.5, 9.6 or latest).", + "value": "9.6", "required": true } ] diff --git a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json index ad17b709e..924c2884b 100644 --- a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json @@ -44,7 +44,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/httpd-24-centos7:latest" + "name": "docker.io/centos/httpd-24-centos7:latest" } } ] @@ -91,7 +91,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/ruby-20-centos7:latest" + "name": "docker.io/openshift/ruby-20-centos7:latest" } }, { @@ -108,7 +108,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/ruby-22-centos7:latest" + "name": "docker.io/centos/ruby-22-centos7:latest" } }, { @@ -125,7 +125,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/ruby-23-centos7:latest" + "name": "docker.io/centos/ruby-23-centos7:latest" } }, { @@ -142,7 +142,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/ruby-24-centos7:latest" + "name": "docker.io/centos/ruby-24-centos7:latest" } } ] @@ -164,7 +164,7 @@ "annotations": { "openshift.io/display-name": "Node.js (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run Node.js applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.", + "description": "Build and run Node.js applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/8/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.", "iconClass": "icon-nodejs", "tags": "builder,nodejs", "supports":"nodejs", @@ -172,7 +172,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "6" + "name": "8" } }, { @@ -189,7 +189,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/nodejs-010-centos7:latest" + "name": "docker.io/openshift/nodejs-010-centos7:latest" } }, { @@ -206,7 +206,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/nodejs-4-centos7:latest" + "name": "docker.io/centos/nodejs-4-centos7:latest" } }, { @@ -223,7 +223,23 @@ }, "from": { "kind": "DockerImage", - "name": "centos/nodejs-6-centos7:latest" + "name": "docker.io/centos/nodejs-6-centos7:latest" + } + }, + { + "name": "8", + "annotations": { + "openshift.io/display-name": "Node.js 8", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 8 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/8/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "8", + "sampleRepo": "https://github.com/openshift/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/nodejs-8-centos7:latest" } } ] @@ -270,7 +286,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/perl-516-centos7:latest" + "name": "docker.io/openshift/perl-516-centos7:latest" } }, { @@ -287,7 +303,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/perl-520-centos7:latest" + "name": "docker.io/centos/perl-520-centos7:latest" } }, { @@ -304,7 +320,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/perl-524-centos7:latest" + "name": "docker.io/centos/perl-524-centos7:latest" } } ] @@ -326,7 +342,7 @@ "annotations": { "openshift.io/display-name": "PHP (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run PHP applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.", + "description": "Build and run PHP applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.", "iconClass": "icon-php", "tags": "builder,php", "supports":"php", @@ -334,7 +350,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "7.0" + "name": "7.1" } }, { @@ -351,7 +367,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/php-55-centos7:latest" + "name": "docker.io/openshift/php-55-centos7:latest" } }, { @@ -368,7 +384,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/php-56-centos7:latest" + "name": "docker.io/centos/php-56-centos7:latest" } }, { @@ -385,7 +401,24 @@ }, "from": { "kind": "DockerImage", - "name": "centos/php-70-centos7:latest" + "name": "docker.io/centos/php-70-centos7:latest" + } + }, + { + "name": "7.1", + "annotations": { + "openshift.io/display-name": "PHP 7.1", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 7.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.1/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:7.1,php", + "version": "7.1", + "sampleRepo": "https://github.com/openshift/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/php-71-centos7:latest" } } ] @@ -432,7 +465,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/python-33-centos7:latest" + "name": "docker.io/openshift/python-33-centos7:latest" } }, { @@ -449,7 +482,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/python-27-centos7:latest" + "name": "docker.io/centos/python-27-centos7:latest" } }, { @@ -466,7 +499,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/python-34-centos7:latest" + "name": "docker.io/centos/python-34-centos7:latest" } }, { @@ -483,7 +516,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/python-35-centos7:latest" + "name": "docker.io/centos/python-35-centos7:latest" } }, { @@ -500,7 +533,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/python-36-centos7:latest" + "name": "docker.io/centos/python-36-centos7:latest" } } ] @@ -547,7 +580,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/wildfly-81-centos7:latest" + "name": "docker.io/openshift/wildfly-81-centos7:latest" } }, { @@ -564,7 +597,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/wildfly-90-centos7:latest" + "name": "docker.io/openshift/wildfly-90-centos7:latest" } }, { @@ -581,7 +614,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/wildfly-100-centos7:latest" + "name": "docker.io/openshift/wildfly-100-centos7:latest" } }, { @@ -598,7 +631,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/wildfly-101-centos7:latest" + "name": "docker.io/openshift/wildfly-101-centos7:latest" } } ] @@ -641,7 +674,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/mysql-55-centos7:latest" + "name": "docker.io/openshift/mysql-55-centos7:latest" } }, { @@ -656,7 +689,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/mysql-56-centos7:latest" + "name": "docker.io/centos/mysql-56-centos7:latest" } }, { @@ -671,7 +704,88 @@ }, "from": { "kind": "DockerImage", - "name": "centos/mysql-57-centos7:latest" + "name": "docker.io/centos/mysql-57-centos7:latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "nginx", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy (nginx)" + } + }, + "spec": { + "tags": [ + { + "name": "1.8", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy 1.8", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP Server and a reverse proxy (nginx) on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.8/README.md.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git", + "version": "1.8" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/nginx-18-centos7:latest" + } + }, + { + "name": "1.10", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy 1.10", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP Server and a reverse proxy (nginx) on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.10/README.md.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git", + "version": "1.10" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/nginx-110-centos7:latest" + } + }, + { + "name": "1.12", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy 1.12", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP Server and a reverse proxy (nginx) on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.12/README.md.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git", + "version": "1.12" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/nginx-112-centos7:latest" + } + }, + { + "name": "latest", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP Server and a reverse proxy (nginx) on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.12/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Nginx available on OpenShift, including major versions updates.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git" + }, + "from": { + "kind": "ImageStreamTag", + "name": "1.12" } } ] @@ -693,13 +807,13 @@ "annotations": { "openshift.io/display-name": "MariaDB (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a MariaDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.", + "description": "Provides a MariaDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.", "iconClass": "icon-mariadb", - "tags": "mariadb" + "tags": "database,mariadb" }, "from": { "kind": "ImageStreamTag", - "name": "10.1" + "name": "10.2" } }, { @@ -709,12 +823,27 @@ "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MariaDB 10.1 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.", "iconClass": "icon-mariadb", - "tags": "mariadb", + "tags": "database,mariadb", "version": "10.1" }, "from": { "kind": "DockerImage", - "name": "centos/mariadb-101-centos7:latest" + "name": "docker.io/centos/mariadb-101-centos7:latest" + } + }, + { + "name": "10.2", + "annotations": { + "openshift.io/display-name": "MariaDB 10.2", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Provides a MariaDB 10.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.2/README.md.", + "iconClass": "icon-mariadb", + "tags": "database,mariadb", + "version": "10.2" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/mariadb-102-centos7:latest" } } ] @@ -736,13 +865,13 @@ "annotations": { "openshift.io/display-name": "PostgreSQL (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.", + "description": "Provides a PostgreSQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.", "iconClass": "icon-postgresql", - "tags": "postgresql" + "tags": "database,postgresql" }, "from": { "kind": "ImageStreamTag", - "name": "9.5" + "name": "9.6" } }, { @@ -750,14 +879,14 @@ "annotations": { "openshift.io/display-name": "PostgreSQL 9.2", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL 9.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.", + "description": "Provides a PostgreSQL 9.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2/README.md.", "iconClass": "icon-postgresql", "tags": "hidden,postgresql", "version": "9.2" }, "from": { "kind": "DockerImage", - "name": "openshift/postgresql-92-centos7:latest" + "name": "docker.io/openshift/postgresql-92-centos7:latest" } }, { @@ -765,14 +894,14 @@ "annotations": { "openshift.io/display-name": "PostgreSQL 9.4", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL 9.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.", + "description": "Provides a PostgreSQL 9.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4/README.md.", "iconClass": "icon-postgresql", - "tags": "postgresql", + "tags": "database,postgresql", "version": "9.4" }, "from": { "kind": "DockerImage", - "name": "centos/postgresql-94-centos7:latest" + "name": "docker.io/centos/postgresql-94-centos7:latest" } }, { @@ -780,14 +909,29 @@ "annotations": { "openshift.io/display-name": "PostgreSQL 9.5", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL 9.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.", + "description": "Provides a PostgreSQL 9.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5/README.md.", "iconClass": "icon-postgresql", - "tags": "postgresql", + "tags": "database,postgresql", "version": "9.5" }, "from": { "kind": "DockerImage", - "name": "centos/postgresql-95-centos7:latest" + "name": "docker.io/centos/postgresql-95-centos7:latest" + } + }, + { + "name": "9.6", + "annotations": { + "openshift.io/display-name": "PostgreSQL 9.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Provides a PostgreSQL 9.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.6/README.md.", + "iconClass": "icon-postgresql", + "tags": "database,postgresql", + "version": "9.6" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/postgresql-96-centos7:latest" } } ] @@ -809,13 +953,13 @@ "annotations": { "openshift.io/display-name": "MongoDB (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a MongoDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.", + "description": "Provides a MongoDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.", "iconClass": "icon-mongodb", - "tags": "mongodb" + "tags": "database,mongodb" }, "from": { "kind": "ImageStreamTag", - "name": "3.2" + "name": "3.4" } }, { @@ -830,7 +974,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/mongodb-24-centos7:latest" + "name": "docker.io/openshift/mongodb-24-centos7:latest" } }, { @@ -840,12 +984,12 @@ "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 2.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.", "iconClass": "icon-mongodb", - "tags": "mongodb", + "tags": "database,mongodb", "version": "2.6" }, "from": { "kind": "DockerImage", - "name": "centos/mongodb-26-centos7:latest" + "name": "docker.io/centos/mongodb-26-centos7:latest" } }, { @@ -855,12 +999,27 @@ "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.", "iconClass": "icon-mongodb", - "tags": "mongodb", + "tags": "database,mongodb", "version": "3.2" }, "from": { "kind": "DockerImage", - "name": "centos/mongodb-32-centos7:latest" + "name": "docker.io/centos/mongodb-32-centos7:latest" + } + }, + { + "name": "3.4", + "annotations": { + "openshift.io/display-name": "MongoDB 3.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Provides a MongoDB 3.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.4/README.md.", + "iconClass": "icon-mongodb", + "tags": "database,mongodb", + "version": "3.4" + }, + "from": { + "kind": "DockerImage", + "name": "docker.io/centos/mongodb-34-centos7:latest" } } ] @@ -903,7 +1062,7 @@ }, "from": { "kind": "DockerImage", - "name": "centos/redis-32-centos7:latest" + "name": "docker.io/centos/redis-32-centos7:latest" } } ] @@ -946,7 +1105,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/jenkins-1-centos7:latest" + "name": "docker.io/openshift/jenkins-1-centos7:latest" } }, { @@ -961,7 +1120,7 @@ }, "from": { "kind": "DockerImage", - "name": "openshift/jenkins-2-centos7:v3.9" + "name": "docker.io/openshift/jenkins-2-centos7:v3.9" } } ] diff --git a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json index efc8705f4..af319beed 100644 --- a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json @@ -164,7 +164,7 @@ "annotations": { "openshift.io/display-name": "Node.js (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run Node.js applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.", + "description": "Build and run Node.js applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/8/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.", "iconClass": "icon-nodejs", "tags": "builder,nodejs", "supports":"nodejs", @@ -172,7 +172,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "6" + "name": "8" } }, { @@ -225,6 +225,22 @@ "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/nodejs-6-rhel7:latest" } + }, + { + "name": "8", + "annotations": { + "openshift.io/display-name": "Node.js 8", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 8 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "8", + "sampleRepo": "https://github.com/openshift/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/nodejs-8-rhel7:latest" + } } ] } @@ -326,7 +342,7 @@ "annotations": { "openshift.io/display-name": "PHP (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run PHP applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.", + "description": "Build and run PHP applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.", "iconClass": "icon-php", "tags": "builder,php", "supports":"php", @@ -334,7 +350,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "7.0" + "name": "7.1" } }, { @@ -387,6 +403,23 @@ "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/php-70-rhel7:latest" } + }, + { + "name": "7.1", + "annotations": { + "openshift.io/display-name": "PHP 7.1", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 7.1 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.1/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:7.1,php", + "version": "7.1", + "sampleRepo": "https://github.com/openshift/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/php-71-rhel7:latest" + } } ] } @@ -583,6 +616,87 @@ "kind": "ImageStream", "apiVersion": "v1", "metadata": { + "name": "nginx", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy (nginx)" + } + }, + "spec": { + "tags": [ + { + "name": "1.8", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy 1.8", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP server and a reverse proxy (nginx) on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.8/README.md.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git", + "version": "1.8" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/nginx-18-rhel7:latest" + } + }, + { + "name": "1.10", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy 1.10", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP server and a reverse proxy (nginx) on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.10/README.md.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git", + "version": "1.10" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/nginx-110-rhel7:latest" + } + }, + { + "name": "1.12", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy 1.12", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP server and a reverse proxy (nginx) on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.12/README.md.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git", + "version": "1.12" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/nginx-112-rhel7:latest" + } + }, + { + "name": "latest", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and serve static content via Nginx HTTP server and a reverse proxy (nginx) on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/nginx-container/blob/master/1.12/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Nginx available on OpenShift, including major versions updates.", + "iconClass": "icon-nginx", + "tags": "builder,nginx", + "supports":"nginx", + "sampleRepo": "https://github.com/sclorg/nginx-ex.git" + }, + "from": { + "kind": "ImageStreamTag", + "name": "1.12" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { "name": "mariadb", "annotations": { "openshift.io/display-name": "MariaDB" @@ -595,13 +709,13 @@ "annotations": { "openshift.io/display-name": "MariaDB (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a MariaDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.", + "description": "Provides a MariaDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.", "iconClass": "icon-mariadb", - "tags": "mariadb" + "tags": "database,mariadb" }, "from": { "kind": "ImageStreamTag", - "name": "10.1" + "name": "10.2" } }, { @@ -611,13 +725,28 @@ "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MariaDB 10.1 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.", "iconClass": "icon-mariadb", - "tags": "mariadb", + "tags": "database,mariadb", "version": "10.1" }, "from": { "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/mariadb-101-rhel7:latest" } + }, + { + "name": "10.2", + "annotations": { + "openshift.io/display-name": "MariaDB 10.2", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Provides a MariaDB 10.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.2/README.md.", + "iconClass": "icon-mariadb", + "tags": "database,mariadb", + "version": "10.2" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/mariadb-102-rhel7:latest" + } } ] } @@ -638,13 +767,13 @@ "annotations": { "openshift.io/display-name": "PostgreSQL (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.", + "description": "Provides a PostgreSQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.", "iconClass": "icon-postgresql", - "tags": "postgresql" + "tags": "database,postgresql" }, "from": { "kind": "ImageStreamTag", - "name": "9.5" + "name": "9.6" } }, { @@ -652,7 +781,7 @@ "annotations": { "openshift.io/display-name": "PostgreSQL 9.2", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL 9.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.", + "description": "Provides a PostgreSQL 9.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2/README.md.", "iconClass": "icon-postgresql", "tags": "hidden,postgresql", "version": "9.2" @@ -667,9 +796,9 @@ "annotations": { "openshift.io/display-name": "PostgreSQL 9.4", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL 9.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.", + "description": "Provides a PostgreSQL 9.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4/README.md.", "iconClass": "icon-postgresql", - "tags": "postgresql", + "tags": "database,postgresql", "version": "9.4" }, "from": { @@ -682,15 +811,30 @@ "annotations": { "openshift.io/display-name": "PostgreSQL 9.5", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a PostgreSQL 9.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.", + "description": "Provides a PostgreSQL 9.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5/README.md.", "iconClass": "icon-postgresql", - "tags": "postgresql", + "tags": "database,postgresql", "version": "9.5" }, "from": { "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/postgresql-95-rhel7:latest" } + }, + { + "name": "9.6", + "annotations": { + "openshift.io/display-name": "PostgreSQL (Ephemeral) 9.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Provides a PostgreSQL 9.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.6/README.md.", + "iconClass": "icon-postgresql", + "tags": "database,postgresql", + "version": "9.6" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/postgresql-96-rhel7:latest" + } } ] } @@ -711,13 +855,13 @@ "annotations": { "openshift.io/display-name": "MongoDB (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Provides a MongoDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.", + "description": "Provides a MongoDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.", "iconClass": "icon-mongodb", "tags": "mongodb" }, "from": { "kind": "ImageStreamTag", - "name": "3.2" + "name": "3.4" } }, { @@ -742,7 +886,7 @@ "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 2.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.", "iconClass": "icon-mongodb", - "tags": "mongodb", + "tags": "database,mongodb", "version": "2.6" }, "from": { @@ -757,13 +901,28 @@ "openshift.io/provider-display-name": "Red Hat, Inc.", "description": "Provides a MongoDB 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.", "iconClass": "icon-mongodb", - "tags": "mongodb", + "tags": "database,mongodb", "version": "3.2" }, "from": { "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/mongodb-32-rhel7:latest" } + }, + { + "name": "3.4", + "annotations": { + "openshift.io/display-name": "MongoDB 3.4", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Provides a MongoDB 3.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.4/README.md.", + "iconClass": "icon-mongodb", + "tags": "database,mongodb", + "version": "3.4" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/mongodb-34-rhel7:latest" + } } ] } diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/README.md index 6d2ccbf7f..710d5f58d 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/README.md +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/README.md @@ -18,6 +18,7 @@ instantiating them. * [Django](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql.json) - Provides a basic Django (Python) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/django-ex). * [Django persistent](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql-persistent.json) - Provides a basic Django (Python) application with a persistent PostgreSQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/django-ex). * [Httpd](https://raw.githubusercontent.com/openshift/httpd-ex/master/openshift/templates/httpd.json) - Provides a basic Httpd static content application. For more information see the [source repository](https://github.com/openshift/httpd-ex). +* [Nginx](https://raw.githubusercontent.com/sclorg/nginx-ex/master/openshift/templates/nginx.json) - Provides a basic Nginx static content application. For more information see the [source repository](https://github.com/sclorg/nginx-ex). * [NodeJS](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json) - Provides a basic NodeJS application with a MongoDB database. For more information see the [source repository](https://github.com/openshift/nodejs-ex). * [NodeJS persistent](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb-persistent.json) - Provides a basic NodeJS application with a persistent MongoDB database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/nodejs-ex). * [Rails](https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json) - Provides a basic Rails (Ruby) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/rails-ex). diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json index 40b4eaa81..8888f19d0 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json @@ -17,8 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.", "labels": { - "template": "cakephp-mysql-persistent", - "app": "cakephp-mysql-persistent" + "template": "cakephp-mysql-persistent", + "app": "cakephp-mysql-persistent" }, "objects": [ { @@ -209,6 +209,7 @@ "readinessProbe": { "timeoutSeconds": 3, "initialDelaySeconds": 3, + "periodSeconds": 60, "httpGet": { "path": "/health.php", "port": 8080 @@ -217,6 +218,7 @@ "livenessProbe": { "timeoutSeconds": 3, "initialDelaySeconds": 30, + "periodSeconds": 60, "httpGet": { "path": "/health.php", "port": 8080 diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json index ecd90e495..2bf7acd8c 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json @@ -17,8 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.", "labels": { - "template": "cakephp-mysql-example", - "app": "cakephp-mysql-example" + "template": "cakephp-mysql-example", + "app": "cakephp-mysql-example" }, "objects": [ { @@ -209,6 +209,7 @@ "readinessProbe": { "timeoutSeconds": 3, "initialDelaySeconds": 3, + "periodSeconds": 60, "httpGet": { "path": "/health.php", "port": 8080 @@ -217,6 +218,7 @@ "livenessProbe": { "timeoutSeconds": 3, "initialDelaySeconds": 30, + "periodSeconds": 60, "httpGet": { "path": "/health.php", "port": 8080 diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json index 17a155600..b29f8ba40 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json @@ -17,8 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", "labels": { - "template": "dancer-mysql-persistent", - "app": "dancer-mysql-persistent" + "template": "dancer-mysql-persistent", + "app": "dancer-mysql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json index abf711535..e76353764 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json @@ -17,8 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", "labels": { - "template": "dancer-mysql-example", - "app": "dancer-mysql-example" + "template": "dancer-mysql-example", + "app": "dancer-mysql-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json index c8dab0b53..7a0ab213a 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json @@ -17,8 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", "labels": { - "template": "django-psql-persistent", - "app": "django-psql-persistent" + "template": "django-psql-persistent", + "app": "django-psql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json index 6395defda..be3fc740c 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json @@ -17,8 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", "labels": { - "template": "django-psql-example", - "app": "django-psql-example" + "template": "django-psql-example", + "app": "django-psql-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json index e944f21a5..67ae3c751 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json @@ -17,8 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.", "labels": { - "template": "httpd-example", - "app": "httpd-example" + "template": "httpd-example", + "app": "httpd-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nginx.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nginx.json new file mode 100644 index 000000000..84aa1f469 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nginx.json @@ -0,0 +1,283 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "nginx-example", + "annotations": { + "openshift.io/display-name": "Nginx HTTP server and a reverse proxy", + "description": "An example Nginx HTTP server and a reverse proxy (nginx) application that serves static content. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/nginx-ex/blob/master/README.md.", + "tags": "quickstart,nginx", + "iconClass": "icon-nginx", + "openshift.io/long-description": "This template defines resources needed to develop a static application served by Nginx HTTP server and a reverse proxy (nginx), including a build configuration and application deployment configuration.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/sclorg/nginx-ex", + "openshift.io/support-url": "https://access.redhat.com" + } + }, + "message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/nginx-ex/blob/master/README.md.", + "labels": { + "template": "nginx-example" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "description": "Exposes and load balances the application pods" + } + }, + "spec": { + "ports": [ + { + "name": "web", + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "name": "${NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } + }, + "spec": { + "host": "${APPLICATION_DOMAIN}", + "to": { + "kind": "Service", + "name": "${NAME}" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "description": "Keeps track of changes in the application image" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "description": "Defines how to build the application", + "template.alpha.openshift.io/wait-for-ready": "true" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "namespace": "${NAMESPACE}", + "name": "nginx:${NGINX_VERSION}" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${NAME}:latest" + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "ConfigChange" + }, + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "description": "Defines how to deploy the application server", + "template.alpha.openshift.io/wait-for-ready": "true" + } + }, + "spec": { + "strategy": { + "type": "Rolling" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "nginx-example" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${NAME}" + }, + "template": { + "metadata": { + "name": "${NAME}", + "labels": { + "name": "${NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "nginx-example", + "image": " ", + "ports": [ + { + "containerPort": 8080 + } + ], + "readinessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 3, + "httpGet": { + "path": "/", + "port": 8080 + } + }, + "livenessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 30, + "httpGet": { + "path": "/", + "port": 8080 + } + }, + "resources": { + "limits": { + "memory": "${MEMORY_LIMIT}" + } + }, + "env": [ + ], + "resources": { + "limits": { + "memory": "${MEMORY_LIMIT}" + } + } + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "NAME", + "displayName": "Name", + "description": "The name assigned to all of the frontend objects defined in this template.", + "required": true, + "value": "nginx-example" + }, + { + "name": "NAMESPACE", + "displayName": "Namespace", + "description": "The OpenShift Namespace where the ImageStream resides.", + "required": true, + "value": "openshift" + }, + { + "name": "NGINX_VERSION", + "displayName": "NGINX Version", + "description": "Version of NGINX image to be used (1.12 by default).", + "required": true, + "value": "1.12" + }, + { + "name": "MEMORY_LIMIT", + "displayName": "Memory Limit", + "description": "Maximum amount of memory the container can use.", + "required": true, + "value": "512Mi" + }, + { + "name": "SOURCE_REPOSITORY_URL", + "displayName": "Git Repository URL", + "description": "The URL of the repository with your application source code.", + "required": true, + "value": "https://github.com/sclorg/nginx-ex.git" + }, + { + "name": "SOURCE_REPOSITORY_REF", + "displayName": "Git Reference", + "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." + }, + { + "name": "CONTEXT_DIR", + "displayName": "Context Directory", + "description": "Set this to the relative path to your project if it is not in the root of your repository." + }, + { + "name": "APPLICATION_DOMAIN", + "displayName": "Application Hostname", + "description": "The exposed hostname that will route to the nginx service, if left blank a value will be defaulted.", + "value": "" + }, + { + "name": "GITHUB_WEBHOOK_SECRET", + "displayName": "GitHub Webhook Secret", + "description": "Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "GENERIC_WEBHOOK_SECRET", + "displayName": "Generic Webhook Secret", + "description": "A secret string used to configure the Generic webhook.", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json index f04adaa67..787f51361 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json @@ -17,8 +17,7 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", "labels": { - "template": "nodejs-mongo-persistent", - "app": "nodejs-mongo-persistent" + "template": "nodejs-mongo-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json index 0ce36dba5..0fcc540ab 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json @@ -17,8 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", "labels": { - "template": "nodejs-mongodb-example", - "app": "nodejs-mongodb-example" + "template": "nodejs-mongodb-example", + "app": "nodejs-mongodb-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json index 10e9382cc..9f40f250b 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json @@ -17,8 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", "labels": { - "template": "rails-pgsql-persistent", - "app": "rails-pgsql-persistent" + "template": "rails-pgsql-persistent", + "app": "rails-pgsql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json index 8ec2c8ea6..77d218aa5 100644 --- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json @@ -17,8 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", "labels": { - "template": "rails-postgresql-example", - "app": "rails-postgresql-example" + "template": "rails-postgresql-example", + "app": "rails-postgresql-example" }, "objects": [ { diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml index 1a34c85fc..9f46a4683 100644 --- a/roles/openshift_examples/meta/main.yml +++ b/roles/openshift_examples/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_utils +- role: openshift_facts diff --git a/roles/openshift_excluder/tasks/verify_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml index 4f5277fa2..22a3fcd3b 100644 --- a/roles/openshift_excluder/tasks/verify_excluder.yml +++ b/roles/openshift_excluder/tasks/verify_excluder.yml @@ -3,7 +3,7 @@ # - excluder - name: Get available excluder version repoquery: - name: "{{ excluder }}" + name: "{{ excluder }}{{ '-' ~ r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.') ~ '*' if r_openshift_excluder_upgrade_target is defined else '' }}" ignore_excluders: true register: repoquery_out diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml index 5ae863871..b38ebdfb4 100644 --- a/roles/openshift_expand_partition/tasks/main.yml +++ b/roles/openshift_expand_partition/tasks/main.yml @@ -8,7 +8,7 @@ - name: Determine if growpart is installed command: "rpm -q cloud-utils-growpart" register: has_growpart - failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout + failed_when: has_growpart.rc != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout changed_when: false when: openshift_is_containerized | bool diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index d7c358a2f..452cc4ef6 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -15,8 +15,10 @@ import os import yaml import struct import socket +import ipaddress from distutils.util import strtobool from distutils.version import LooseVersion +from ansible.module_utils.six import u from ansible.module_utils.six import string_types from ansible.module_utils.six.moves import configparser @@ -1146,6 +1148,8 @@ def set_proxy_facts(facts): if 'no_proxy_internal_hostnames' in common: common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(',')) # We always add local dns domain and ourselves no matter what + kube_svc_ip = str(ipaddress.ip_network(u(common['portal_net']))[1]) + common['no_proxy'].append(kube_svc_ip) common['no_proxy'].append('.' + common['dns_domain']) common['no_proxy'].append('.svc') common['no_proxy'].append(common['hostname']) @@ -1430,9 +1434,6 @@ class OpenShiftFacts(object): dynamic_provisioning_enabled=True, max_requests_inflight=500) - if 'node' in roles: - defaults['node'] = dict(labels={}) - if 'cloudprovider' in roles: defaults['cloudprovider'] = dict(kind=None) @@ -1465,6 +1466,11 @@ class OpenShiftFacts(object): if metadata: metadata['project']['attributes'].pop('sshKeys', None) metadata['instance'].pop('serviceAccounts', None) + elif bios_vendor == 'Amazon EC2': + # Adds support for Amazon EC2 C5 instance types + provider = 'aws' + metadata_url = 'http://169.254.169.254/latest/meta-data/' + metadata = get_provider_metadata(metadata_url) elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version): provider = 'aws' metadata_url = 'http://169.254.169.254/latest/meta-data/' diff --git a/roles/openshift_gcp/files/bootstrap-script.sh b/roles/openshift_gcp/files/bootstrap-script.sh new file mode 100644 index 000000000..0c3f1999b --- /dev/null +++ b/roles/openshift_gcp/files/bootstrap-script.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# +# This script is a startup script for bootstrapping a GCP node +# from a config stored in the project metadata. It loops until +# it finds the script and then starts the origin-node service. +# TODO: generalize + +set -o errexit +set -o nounset +set -o pipefail + +if [[ "$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/bootstrap" -H "Metadata-Flavor: Google" )" != "true" ]]; then + echo "info: Bootstrap is not enabled for this instance, skipping" 1>&2 + exit 0 +fi + +if ! id=$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-id" -H "Metadata-Flavor: Google" ); then + echo "error: Unable to get cluster-id for instance from cluster metadata" 1>&2 + exit 1 +fi + +if ! node_group=$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/node-group" -H "Metadata-Flavor: Google" ); then + echo "error: Unable to get node-group for instance from cluster metadata" 1>&2 + exit 1 +fi + +if ! config=$( curl -f "http://metadata.google.internal/computeMetadata/v1/instance/attributes/bootstrap-config" -H "Metadata-Flavor: Google" 2>/dev/null ); then + while true; do + if config=$( curl -f "http://metadata.google.internal/computeMetadata/v1/project/attributes/${id}-bootstrap-config" -H "Metadata-Flavor: Google" 2>/dev/null ); then + break + fi + echo "info: waiting for ${id}-bootstrap-config to become available in cluster metadata ..." 1>&2 + sleep 5 + done +fi + +echo "Got bootstrap config from metadata" +mkdir -p /etc/origin/node +echo -n "${config}" > /etc/origin/node/bootstrap.kubeconfig +echo "BOOTSTRAP_CONFIG_NAME=node-config-${node_group}" >> /etc/sysconfig/origin-node +systemctl enable origin-node +systemctl start origin-node diff --git a/roles/openshift_gcp/files/openshift-bootstrap-update.service b/roles/openshift_gcp/files/openshift-bootstrap-update.service new file mode 100644 index 000000000..c65b1b34e --- /dev/null +++ b/roles/openshift_gcp/files/openshift-bootstrap-update.service @@ -0,0 +1,7 @@ +[Unit] +Description=Update the OpenShift node bootstrap configuration + +[Service] +Type=oneshot +ExecStart=/usr/bin/openshift-bootstrap-update +User=root diff --git a/roles/openshift_gcp/files/openshift-bootstrap-update.timer b/roles/openshift_gcp/files/openshift-bootstrap-update.timer new file mode 100644 index 000000000..1a517b33e --- /dev/null +++ b/roles/openshift_gcp/files/openshift-bootstrap-update.timer @@ -0,0 +1,10 @@ +[Unit] +Description=Update the OpenShift node bootstrap credentials hourly + +[Timer] +OnBootSec=30s +OnCalendar=hourly +Persistent=true + +[Install] +WantedBy=timers.target
\ No newline at end of file diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp/files/partition.conf index b87e5e0b6..76e65ab9c 100644 --- a/roles/openshift_gcp_image_prep/files/partition.conf +++ b/roles/openshift_gcp/files/partition.conf @@ -1,3 +1,3 @@ [Service] ExecStartPost=-/usr/bin/growpart /dev/sda 1 -ExecStartPost=-/sbin/xfs_growfs / +ExecStartPost=-/sbin/xfs_growfs /
\ No newline at end of file diff --git a/roles/openshift_gcp/meta/main.yml b/roles/openshift_gcp/meta/main.yml new file mode 100644 index 000000000..5e428f8de --- /dev/null +++ b/roles/openshift_gcp/meta/main.yml @@ -0,0 +1,17 @@ +--- +galaxy_info: + author: Clayton Coleman + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- role: lib_utils +- role: lib_openshift diff --git a/roles/openshift_gcp/tasks/add_custom_repositories.yml b/roles/openshift_gcp/tasks/add_custom_repositories.yml new file mode 100644 index 000000000..04718f78e --- /dev/null +++ b/roles/openshift_gcp/tasks/add_custom_repositories.yml @@ -0,0 +1,20 @@ +--- +- name: Copy custom repository secrets + copy: + src: "{{ files_dir }}/{{ item.1.sslclientcert }}" + dest: /var/lib/yum/custom_secret_{{ item.0 }}_cert + when: item.1.sslclientcert | default(false) + with_indexed_items: "{{ provision_custom_repositories }}" +- name: Copy custom repository secrets + copy: + src: "{{ files_dir }}/{{ item.1.sslclientkey }}" + dest: /var/lib/yum/custom_secret_{{ item.0 }}_key + when: item.1.sslclientkey | default(false) + with_indexed_items: "{{ provision_custom_repositories }}" + +- name: Create any custom repos that are defined + template: + src: yum_repo.j2 + dest: /etc/yum.repos.d/provision_custom_repositories.repo + when: provision_custom_repositories | length > 0 + notify: refresh cache diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp/tasks/configure_gcp_base_image.yml index fee5ab618..2c6e2790a 100644 --- a/roles/openshift_gcp_image_prep/tasks/main.yaml +++ b/roles/openshift_gcp/tasks/configure_gcp_base_image.yml @@ -1,18 +1,10 @@ ---- # GCE instances are starting with xfs AND barrier=1, which is only for extfs. +--- - name: Remove barrier=1 from XFS fstab entries - lineinfile: - path: /etc/fstab - regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$' - line: '\1xfs\2 \4' - backrefs: yes + command: sed -i -e 's/xfs\(.*\)barrier=1/xfs\1/g; s/, / /g' /etc/fstab - name: Ensure the root filesystem has XFS group quota turned on - lineinfile: - path: /boot/grub2/grub.cfg - regexp: '^(.*)linux16 (.*)$' - line: '\1linux16 \2 rootflags=gquota' - backrefs: yes + command: sed -i -e 's/linux16 \(.*\)$/linux16 \1 rootflags=gquota/g' /boot/grub2/grub.cfg - name: Ensure the root partition grows on startup copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/ diff --git a/roles/openshift_gcp/tasks/configure_master_bootstrap.yml b/roles/openshift_gcp/tasks/configure_master_bootstrap.yml new file mode 100644 index 000000000..591cb593c --- /dev/null +++ b/roles/openshift_gcp/tasks/configure_master_bootstrap.yml @@ -0,0 +1,36 @@ +# +# These tasks configure the instance to periodically update the project metadata with the +# latest bootstrap kubeconfig from the project metadata. This keeps the project metadata +# in sync with the cluster's configuration. We then invoke a CSR approve on any nodes that +# are waiting to join the cluster. +# +--- +- name: Copy unit service + copy: + src: openshift-bootstrap-update.timer + dest: /etc/systemd/system/openshift-bootstrap-update.timer + owner: root + group: root + mode: 0664 + +- name: Copy unit timer + copy: + src: openshift-bootstrap-update.service + dest: /etc/systemd/system/openshift-bootstrap-update.service + owner: root + group: root + mode: 0664 + +- name: Create bootstrap update script + template: src=openshift-bootstrap-update.j2 dest=/usr/bin/openshift-bootstrap-update mode=u+rx + +- name: Start bootstrap update timer + systemd: + name: "openshift-bootstrap-update.timer" + state: started + +- name: Bootstrap all nodes that were identified with bootstrap metadata + run_once: true + oc_adm_csr: + nodes: "{{ groups['all'] | map('extract', hostvars) | selectattr('gce_metadata.bootstrap', 'match', 'true') | map(attribute='gce_name') | list }}" + timeout: 60 diff --git a/roles/openshift_gcp/tasks/configure_master_healthcheck.yml b/roles/openshift_gcp/tasks/configure_master_healthcheck.yml new file mode 100644 index 000000000..aa9655977 --- /dev/null +++ b/roles/openshift_gcp/tasks/configure_master_healthcheck.yml @@ -0,0 +1,19 @@ +--- +- name: refresh yum cache + command: yum clean all + args: + warn: no + when: ansible_os_family == "RedHat" + +- name: install haproxy + package: name=haproxy state=present + register: result + until: '"failed" not in result' + retries: 10 + delay: 10 + +- name: configure haproxy + template: src=master_healthcheck.j2 dest=/etc/haproxy/haproxy.cfg + +- name: start and enable haproxy service + service: name=haproxy state=started enabled=yes diff --git a/roles/openshift_gcp/tasks/dynamic_inventory.yml b/roles/openshift_gcp/tasks/dynamic_inventory.yml new file mode 100644 index 000000000..1637da945 --- /dev/null +++ b/roles/openshift_gcp/tasks/dynamic_inventory.yml @@ -0,0 +1,5 @@ +--- +- name: Extract PEM from service account file + copy: content="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).private_key }}" dest=/tmp/gce.pem mode=0600 +- name: Templatize environment script + template: src=inventory.j2.sh dest=/tmp/inventory.sh mode=u+rx diff --git a/roles/openshift_gcp/tasks/frequent_log_rotation.yml b/roles/openshift_gcp/tasks/frequent_log_rotation.yml new file mode 100644 index 000000000..0b4b27f84 --- /dev/null +++ b/roles/openshift_gcp/tasks/frequent_log_rotation.yml @@ -0,0 +1,18 @@ +--- +- name: Rotate logs daily + replace: + dest: /etc/logrotate.conf + regexp: '^weekly|monthly|yearly$' + replace: daily +- name: Rotate at a smaller size of log + lineinfile: + dest: /etc/logrotate.conf + state: present + regexp: '^size' + line: size 10M +- name: Limit total size of log files + lineinfile: + dest: /etc/logrotate.conf + state: present + regexp: '^maxsize' + line: maxsize 20M diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yml index ad205ba33..fb147bc78 100644 --- a/roles/openshift_gcp/tasks/main.yaml +++ b/roles/openshift_gcp/tasks/main.yml @@ -17,7 +17,7 @@ - name: Provision GCP DNS domain command: /tmp/openshift_gcp_provision_dns.sh args: - chdir: "{{ playbook_dir }}/files" + chdir: "{{ files_dir }}" register: dns_provision when: - state | default('present') == 'present' @@ -33,7 +33,7 @@ - name: Provision GCP resources command: /tmp/openshift_gcp_provision.sh args: - chdir: "{{ playbook_dir }}/files" + chdir: "{{ files_dir }}" when: - state | default('present') == 'present' diff --git a/roles/openshift_gcp/tasks/node_cloud_config.yml b/roles/openshift_gcp/tasks/node_cloud_config.yml new file mode 100644 index 000000000..4e982f497 --- /dev/null +++ b/roles/openshift_gcp/tasks/node_cloud_config.yml @@ -0,0 +1,12 @@ +--- +- name: ensure the /etc/origin folder exists + file: name=/etc/origin state=directory + +- name: configure gce cloud config options + ini_file: dest=/etc/origin/cloudprovider/gce.conf section=Global option={{ item.key }} value={{ item.value }} state=present create=yes + with_items: + - { key: 'project-id', value: '{{ openshift_gcp_project }}' } + - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } + - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } + - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } + - { key: 'multizone', value: 'false' } diff --git a/roles/openshift_gcp/tasks/publish_image.yml b/roles/openshift_gcp/tasks/publish_image.yml new file mode 100644 index 000000000..db8a7ca69 --- /dev/null +++ b/roles/openshift_gcp/tasks/publish_image.yml @@ -0,0 +1,32 @@ +--- +- name: Require openshift_gcp_image + fail: + msg: "A source image name or family is required for image publishing. Please ensure `openshift_gcp_image` is defined." + when: openshift_gcp_image is undefined + +- name: Require openshift_gcp_target_image + fail: + msg: "A target image name or family is required for image publishing. Please ensure `openshift_gcp_target_image` is defined." + when: openshift_gcp_target_image is undefined + +- block: + - name: Retrieve images in the {{ openshift_gcp_target_image }} family + command: > + gcloud --project "{{ openshift_gcp_project }}" compute images list + "--filter=family={{ openshift_gcp_target_image }}" + --format=json --sort-by ~creationTimestamp + register: images + - name: Prune oldest images + command: > + gcloud --project "{{ openshift_gcp_project }}" compute images delete "{{ item['name'] }}" + with_items: "{{ (images.stdout | default('[]') | from_json )[( openshift_gcp_keep_images | int ):] }}" + when: openshift_gcp_keep_images is defined + +- name: Copy the latest image in the family {{ openshift_gcp_image }} to {{ openshift_gcp_target_image }} + command: > + gcloud --project "{{ openshift_gcp_target_project | default(openshift_gcp_project) }}" + beta compute images create + "{{ openshift_gcp_target_image_name | default(openshift_gcp_target_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" + --family "{{ openshift_gcp_target_image }}" + --source-image-family "{{ openshift_gcp_image }}" + --source-image-project "{{ openshift_gcp_project }}" diff --git a/roles/openshift_gcp/tasks/setup_scale_group_facts.yml b/roles/openshift_gcp/tasks/setup_scale_group_facts.yml new file mode 100644 index 000000000..0fda43123 --- /dev/null +++ b/roles/openshift_gcp/tasks/setup_scale_group_facts.yml @@ -0,0 +1,44 @@ +--- +- name: Add masters to requisite groups + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: masters, etcd + with_items: "{{ groups['tag_ocp-master'] }}" + +- name: Add a master to the primary masters group + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: primary_master + with_items: "{{ groups['tag_ocp-master'].0 }}" + +- name: Add non-bootstrapping master node instances to node group + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: nodes + openshift_node_labels: + role: infra + with_items: "{{ groups['tag_ocp-master'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}" + +- name: Add infra node instances to node group + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: nodes + openshift_node_labels: + role: infra + with_items: "{{ groups['tag_ocp-infra-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}" + +- name: Add node instances to node group + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: nodes + openshift_node_labels: + role: app + with_items: "{{ groups['tag_ocp-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}" + +- name: Add bootstrap node instances + add_host: + name: "{{ hostvars[item].gce_name }}" + groups: bootstrap_nodes + openshift_node_bootstrap: True + with_items: "{{ groups['tag_ocp-node'] | default([]) | intersect(groups['tag_ocp-bootstrap'] | default([])) }}" + when: not (openshift_node_bootstrap | default(False)) diff --git a/roles/openshift_gcp/templates/inventory.j2.sh b/roles/openshift_gcp/templates/inventory.j2.sh new file mode 100644 index 000000000..dcaffb578 --- /dev/null +++ b/roles/openshift_gcp/templates/inventory.j2.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +export GCE_PROJECT="{{ openshift_gcp_project }}" +export GCE_ZONE="{{ openshift_gcp_zone }}" +export GCE_EMAIL="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" +export GCE_PEM_FILE_PATH="/tmp/gce.pem" +export INVENTORY_IP_TYPE="{{ inventory_ip_type }}" +export GCE_TAGGED_INSTANCES="{{ openshift_gcp_prefix }}ocp"
\ No newline at end of file diff --git a/roles/openshift_gcp/templates/master_healthcheck.j2 b/roles/openshift_gcp/templates/master_healthcheck.j2 new file mode 100644 index 000000000..189e578c5 --- /dev/null +++ b/roles/openshift_gcp/templates/master_healthcheck.j2 @@ -0,0 +1,68 @@ +#--------------------------------------------------------------------- +# Example configuration for a possible web application. See the +# full configuration options online. +# +# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt +# +#--------------------------------------------------------------------- + +#--------------------------------------------------------------------- +# Global settings +#--------------------------------------------------------------------- +global + # to have these messages end up in /var/log/haproxy.log you will + # need to: + # + # 1) configure syslog to accept network log events. This is done + # by adding the '-r' option to the SYSLOGD_OPTIONS in + # /etc/sysconfig/syslog + # + # 2) configure local2 events to go to the /var/log/haproxy.log + # file. A line like the following can be added to + # /etc/sysconfig/syslog + # + # local2.* /var/log/haproxy.log + # + log 127.0.0.1 local2 + + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + + # turn on stats unix socket + stats socket /var/lib/haproxy/stats + +#--------------------------------------------------------------------- +# common defaults that all the 'listen' and 'backend' sections will +# use if not designated in their block +#--------------------------------------------------------------------- +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option forwardfor except 127.0.0.0/8 + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 + +#--------------------------------------------------------------------- +# main frontend which proxys to the backends +#--------------------------------------------------------------------- +frontend http-proxy *:8080 + acl url_healthz path_beg -i /healthz + use_backend ocp if url_healthz + +backend ocp + server ocp localhost:{{ internal_console_port }} ssl verify none diff --git a/roles/openshift_gcp/templates/openshift-bootstrap-update.j2 b/roles/openshift_gcp/templates/openshift-bootstrap-update.j2 new file mode 100644 index 000000000..5b0563724 --- /dev/null +++ b/roles/openshift_gcp/templates/openshift-bootstrap-update.j2 @@ -0,0 +1,7 @@ +#!/bin/bash + +set -euo pipefail + +oc serviceaccounts create-kubeconfig -n openshift-infra node-bootstrapper > /root/bootstrap.kubeconfig +gcloud compute project-info --project '{{ openshift_gcp_project }}' add-metadata --metadata-from-file '{{ openshift_gcp_prefix + openshift_gcp_clusterid | default("default") }}-bootstrap-config=/root/bootstrap.kubeconfig' +rm -f /root/bootstrap.kubeconfig diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh index 4d150bc74..794985322 100644 --- a/roles/openshift_gcp/templates/provision.j2.sh +++ b/roles/openshift_gcp/templates/provision.j2.sh @@ -9,15 +9,26 @@ if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then ssh-add "{{ openshift_gcp_ssh_private_key }}" || true fi - # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there - pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub") + # Check if the public key is in the project metadata, and if not, add it there + if [ -f "{{ openshift_gcp_ssh_private_key }}.pub" ]; then + pub_file="{{ openshift_gcp_ssh_private_key }}.pub" + pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub") + else + keyfile="${HOME}/.ssh/google_compute_engine" + pub_file="${keyfile}.pub" + mkdir -p "${HOME}/.ssh" + cp "{{ openshift_gcp_ssh_private_key }}" "${keyfile}" + chmod 0600 "${keyfile}" + ssh-keygen -y -f "${keyfile}" > "${pub_file}" + pub_key=$(cut -d ' ' -f 2 < "${pub_file}") + fi key_tmp_file='/tmp/ocp-gce-keys' if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file" fi echo -n 'cloud-user:' >> "$key_tmp_file" - cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file" + cat "${pub_file}" >> "$key_tmp_file" gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}" rm -f "$key_tmp_file" fi diff --git a/roles/openshift_gcp/templates/yum_repo.j2 b/roles/openshift_gcp/templates/yum_repo.j2 new file mode 100644 index 000000000..77919ea75 --- /dev/null +++ b/roles/openshift_gcp/templates/yum_repo.j2 @@ -0,0 +1,20 @@ +{% for repo in provision_custom_repositories %} +[{{ repo.id | default(repo.name) }}] +name={{ repo.name | default(repo.id) }} +baseurl={{ repo.baseurl }} +{% set enable_repo = repo.enabled | default(1) %} +enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }} +{% set enable_gpg_check = repo.gpgcheck | default(1) %} +gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }} +{% if 'sslclientcert' in repo %} +sslclientcert={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_cert" if repo.sslclientcert }} +{% endif %} +{% if 'sslclientkey' in repo %} +sslclientkey={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_key" if repo.sslclientkey }} +{% endif %} +{% for key, value in repo.iteritems() %} +{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck', 'sslclientkey', 'sslclientcert'] and value is defined %} +{{ key }}={{ value }} +{% endif %} +{% endfor %} +{% endfor %} diff --git a/roles/openshift_grafana/defaults/main.yml b/roles/openshift_grafana/defaults/main.yml new file mode 100644 index 000000000..7fd7a085d --- /dev/null +++ b/roles/openshift_grafana/defaults/main.yml @@ -0,0 +1,12 @@ +--- +gf_body_tmp: + name: grafana_name + type: prometheus + typeLogoUrl: '' + access: proxy + url: prometheus_url + basicAuth: false + withCredentials: false + jsonData: + tlsSkipVerify: true + token: satoken diff --git a/roles/openshift_grafana/files/grafana-ocp-oauth.yml b/roles/openshift_grafana/files/grafana-ocp-oauth.yml new file mode 100644 index 000000000..82fa89004 --- /dev/null +++ b/roles/openshift_grafana/files/grafana-ocp-oauth.yml @@ -0,0 +1,661 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: grafana-ocp + annotations: + "openshift.io/display-name": Grafana ocp + description: | + Grafana server with patched Prometheus datasource. + iconClass: icon-cogs + tags: "metrics,monitoring,grafana,prometheus" +parameters: +- description: The location of the proxy image + name: IMAGE_GF + value: mrsiano/grafana-ocp:latest +- description: The location of the proxy image + name: IMAGE_PROXY + value: openshift/oauth-proxy:v1.0.0 +- description: External URL for the grafana route + name: ROUTE_URL + value: "" +- description: The namespace to instantiate heapster under. Defaults to 'grafana'. + name: NAMESPACE + value: grafana +- description: The session secret for the proxy + name: SESSION_SECRET + generate: expression + from: "[a-zA-Z0-9]{43}" +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: grafana-ocp + namespace: "${NAMESPACE}" + annotations: + serviceaccounts.openshift.io/oauth-redirectreference.primary: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"grafana-ocp"}}' +- apiVersion: authorization.openshift.io/v1 + kind: ClusterRoleBinding + metadata: + name: gf-cluster-reader + roleRef: + name: cluster-reader + subjects: + - kind: ServiceAccount + name: grafana-ocp + namespace: "${NAMESPACE}" +- apiVersion: route.openshift.io/v1 + kind: Route + metadata: + name: grafana-ocp + namespace: "${NAMESPACE}" + spec: + host: "${ROUTE_URL}" + to: + name: grafana-ocp + tls: + termination: Reencrypt +- apiVersion: v1 + kind: Service + metadata: + name: grafana-ocp + annotations: + prometheus.io/scrape: "true" + prometheus.io/scheme: https + service.alpha.openshift.io/serving-cert-secret-name: gf-tls + namespace: "${NAMESPACE}" + labels: + metrics-infra: grafana-ocp + name: grafana-ocp + spec: + ports: + - name: grafana-ocp + port: 443 + protocol: TCP + targetPort: 8443 + selector: + app: grafana-ocp +- apiVersion: v1 + kind: Secret + metadata: + name: gf-proxy + namespace: "${NAMESPACE}" + stringData: + session_secret: "${SESSION_SECRET}=" +# Deploy Prometheus behind an oauth proxy +- apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + labels: + app: grafana-ocp + name: grafana-ocp + namespace: "${NAMESPACE}" + spec: + replicas: 1 + selector: + matchLabels: + app: grafana-ocp + template: + metadata: + labels: + app: grafana-ocp + name: grafana-ocp-app + spec: + serviceAccountName: grafana-ocp + containers: + - name: oauth-proxy + image: ${IMAGE_PROXY} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8443 + name: web + args: + - -https-address=:8443 + - -http-address= + - -email-domain=* + - -client-id=system:serviceaccount:${NAMESPACE}:grafana-ocp + - -upstream=http://localhost:3000 + - -provider=openshift +# - '-openshift-delegate-urls={"/api/datasources": {"resource": "namespace", "verb": "get", "resourceName": "grafana-ocp", "namespace": "${NAMESPACE}"}}' + - '-openshift-sar={"namespace": "${NAMESPACE}", "verb": "list", "resource": "services"}' + - -tls-cert=/etc/tls/private/tls.crt + - -tls-key=/etc/tls/private/tls.key + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret-file=/etc/proxy/secrets/session_secret + - -skip-auth-regex=^/metrics,/api/datasources,/api/dashboards + volumeMounts: + - mountPath: /etc/tls/private + name: gf-tls + - mountPath: /etc/proxy/secrets + name: secrets + + - name: grafana-ocp + image: ${IMAGE_GF} + ports: + - name: grafana-http + containerPort: 3000 + volumeMounts: + - mountPath: "/root/go/src/github.com/grafana/grafana/data" + name: gf-data + - mountPath: "/root/go/src/github.com/grafana/grafana/conf" + name: gfconfig + - mountPath: /etc/tls/private + name: gf-tls + - mountPath: /etc/proxy/secrets + name: secrets + command: + - "./bin/grafana-server" + + volumes: + - name: gfconfig + configMap: + name: gf-config + - name: secrets + secret: + secretName: gf-proxy + - name: gf-tls + secret: + secretName: gf-tls + - emptyDir: {} + name: gf-data +- apiVersion: v1 + kind: ConfigMap + metadata: + name: gf-config + namespace: "${NAMESPACE}" + data: + defaults.ini: |- + ##################### Grafana Configuration Defaults ##################### + # + # Do not modify this file in grafana installs + # + + # possible values : production, development + app_mode = production + + # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty + instance_name = ${HOSTNAME} + + #################################### Paths ############################### + [paths] + # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) + # + data = data + # + # Directory where grafana can store logs + # + logs = data/log + # + # Directory where grafana will automatically scan and look for plugins + # + plugins = data/plugins + + #################################### Server ############################## + [server] + # Protocol (http, https, socket) + protocol = http + + # The ip address to bind to, empty will bind to all interfaces + http_addr = + + # The http port to use + http_port = 3000 + + # The public facing domain name used to access grafana from a browser + domain = localhost + + # Redirect to correct domain if host header does not match domain + # Prevents DNS rebinding attacks + enforce_domain = false + + # The full public facing url + root_url = %(protocol)s://%(domain)s:%(http_port)s/ + + # Log web requests + router_logging = false + + # the path relative working path + static_root_path = public + + # enable gzip + enable_gzip = false + + # https certs & key file + cert_file = /etc/tls/private/tls.crt + cert_key = /etc/tls/private/tls.key + + # Unix socket path + socket = /tmp/grafana.sock + + #################################### Database ############################ + [database] + # You can configure the database connection by specifying type, host, name, user and password + # as separate properties or as on string using the url property. + + # Either "mysql", "postgres" or "sqlite3", it's your choice + type = sqlite3 + host = 127.0.0.1:3306 + name = grafana + user = root + # If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;""" + password = + # Use either URL or the previous fields to configure the database + # Example: mysql://user:secret@host:port/database + url = + + # Max idle conn setting default is 2 + max_idle_conn = 2 + + # Max conn setting default is 0 (mean not set) + max_open_conn = + + # For "postgres", use either "disable", "require" or "verify-full" + # For "mysql", use either "true", "false", or "skip-verify". + ssl_mode = disable + + ca_cert_path = + client_key_path = + client_cert_path = + server_cert_name = + + # For "sqlite3" only, path relative to data_path setting + path = grafana.db + + #################################### Session ############################# + [session] + # Either "memory", "file", "redis", "mysql", "postgres", "memcache", default is "file" + provider = file + + # Provider config options + # memory: not have any config yet + # file: session dir path, is relative to grafana data_path + # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana` + # postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable + # mysql: go-sql-driver/mysql dsn config string, examples: + # `user:password@tcp(127.0.0.1:3306)/database_name` + # `user:password@unix(/var/run/mysqld/mysqld.sock)/database_name` + # memcache: 127.0.0.1:11211 + + + provider_config = sessions + + # Session cookie name + cookie_name = grafana_sess + + # If you use session in https only, default is false + cookie_secure = false + + # Session life time, default is 86400 + session_life_time = 86400 + gc_interval_time = 86400 + + #################################### Data proxy ########################### + [dataproxy] + + # This enables data proxy logging, default is false + logging = false + + #################################### Analytics ########################### + [analytics] + # Server reporting, sends usage counters to stats.grafana.org every 24 hours. + # No ip addresses are being tracked, only simple counters to track + # running instances, dashboard and error counts. It is very helpful to us. + # Change this option to false to disable reporting. + reporting_enabled = true + + # Set to false to disable all checks to https://grafana.com + # for new versions (grafana itself and plugins), check is used + # in some UI views to notify that grafana or plugin update exists + # This option does not cause any auto updates, nor send any information + # only a GET request to https://grafana.com to get latest versions + check_for_updates = true + + # Google Analytics universal tracking code, only enabled if you specify an id here + google_analytics_ua_id = + + # Google Tag Manager ID, only enabled if you specify an id here + google_tag_manager_id = + + #################################### Security ############################ + [security] + # default admin user, created on startup + admin_user = admin + + # default admin password, can be changed before first start of grafana, or in profile settings + admin_password = admin + + # used for signing + secret_key = SW2YcwTIb9zpOOhoPsMm + + # Auto-login remember days + login_remember_days = 7 + cookie_username = grafana_user + cookie_remember_name = grafana_remember + + # disable gravatar profile images + disable_gravatar = false + + # data source proxy whitelist (ip_or_domain:port separated by spaces) + data_source_proxy_whitelist = + + [snapshots] + # snapshot sharing options + external_enabled = true + external_snapshot_url = https://snapshots-origin.raintank.io + external_snapshot_name = Publish to snapshot.raintank.io + + # remove expired snapshot + snapshot_remove_expired = true + + # remove snapshots after 90 days + snapshot_TTL_days = 90 + + #################################### Users #################################### + [users] + # disable user signup / registration + allow_sign_up = true + + # Allow non admin users to create organizations + allow_org_create = true + + # Set to true to automatically assign new users to the default organization (id 1) + auto_assign_org = true + + # Default role new users will be automatically assigned (if auto_assign_org above is set to true) + auto_assign_org_role = Admin + + # Require email validation before sign up completes + verify_email_enabled = false + + # Background text for the user field on the login page + login_hint = email or username + + # Default UI theme ("dark" or "light") + default_theme = dark + + # External user management + external_manage_link_url = + external_manage_link_name = + external_manage_info = + + [auth] + # Set to true to disable (hide) the login form, useful if you use OAuth + disable_login_form = true + + # Set to true to disable the signout link in the side menu. useful if you use auth.proxy + disable_signout_menu = true + + #################################### Anonymous Auth ###################### + [auth.anonymous] + # enable anonymous access + enabled = true + + # specify organization name that should be used for unauthenticated users + org_name = Main Org. + + # specify role for unauthenticated users + org_role = Admin + + #################################### Github Auth ######################### + [auth.github] + enabled = false + allow_sign_up = true + client_id = some_id + client_secret = some_secret + scopes = user:email + auth_url = https://github.com/login/oauth/authorize + token_url = https://github.com/login/oauth/access_token + api_url = https://api.github.com/user + team_ids = + allowed_organizations = + + #################################### Google Auth ######################### + [auth.google] + enabled = false + allow_sign_up = true + client_id = some_client_id + client_secret = some_client_secret + scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email + auth_url = https://accounts.google.com/o/oauth2/auth + token_url = https://accounts.google.com/o/oauth2/token + api_url = https://www.googleapis.com/oauth2/v1/userinfo + allowed_domains = + hosted_domain = + + #################################### Grafana.com Auth #################### + # legacy key names (so they work in env variables) + [auth.grafananet] + enabled = false + allow_sign_up = true + client_id = some_id + client_secret = some_secret + scopes = user:email + allowed_organizations = + + [auth.grafana_com] + enabled = false + allow_sign_up = true + client_id = some_id + client_secret = some_secret + scopes = user:email + allowed_organizations = + + #################################### Generic OAuth ####################### + [auth.generic_oauth] + name = OAuth + enabled = false + allow_sign_up = true + client_id = some_id + client_secret = some_secret + scopes = user:email + auth_url = + token_url = + api_url = + team_ids = + allowed_organizations = + + #################################### Basic Auth ########################## + [auth.basic] + enabled = false + + #################################### Auth Proxy ########################## + [auth.proxy] + enabled = true + header_name = X-WEBAUTH-USER + header_property = username + auto_sign_up = true + ldap_sync_ttl = 60 + whitelist = + + #################################### Auth LDAP ########################### + [auth.ldap] + enabled = false + config_file = /etc/grafana/ldap.toml + allow_sign_up = true + + #################################### SMTP / Emailing ##################### + [smtp] + enabled = false + host = localhost:25 + user = + # If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;""" + password = + cert_file = + key_file = + skip_verify = false + from_address = admin@grafana.localhost + from_name = Grafana + ehlo_identity = + + [emails] + welcome_email_on_sign_up = false + templates_pattern = emails/*.html + + #################################### Logging ########################## + [log] + # Either "console", "file", "syslog". Default is console and file + # Use space to separate multiple modes, e.g. "console file" + mode = console file + + # Either "debug", "info", "warn", "error", "critical", default is "info" + level = error + + # optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug + filters = + + # For "console" mode only + [log.console] + level = + + # log line format, valid options are text, console and json + format = console + + # For "file" mode only + [log.file] + level = + + # log line format, valid options are text, console and json + format = text + + # This enables automated log rotate(switch of following options), default is true + log_rotate = true + + # Max line number of single file, default is 1000000 + max_lines = 1000000 + + # Max size shift of single file, default is 28 means 1 << 28, 256MB + max_size_shift = 28 + + # Segment log daily, default is true + daily_rotate = true + + # Expired days of log file(delete after max days), default is 7 + max_days = 7 + + [log.syslog] + level = + + # log line format, valid options are text, console and json + format = text + + # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used. + network = + address = + + # Syslog facility. user, daemon and local0 through local7 are valid. + facility = + + # Syslog tag. By default, the process' argv[0] is used. + tag = + + + #################################### AMQP Event Publisher ################ + [event_publisher] + enabled = false + rabbitmq_url = amqp://localhost/ + exchange = grafana_events + + #################################### Dashboard JSON files ################ + [dashboards.json] + enabled = false + path = /var/lib/grafana/dashboards + + #################################### Usage Quotas ######################## + [quota] + enabled = false + + #### set quotas to -1 to make unlimited. #### + # limit number of users per Org. + org_user = 10 + + # limit number of dashboards per Org. + org_dashboard = 100 + + # limit number of data_sources per Org. + org_data_source = 10 + + # limit number of api_keys per Org. + org_api_key = 10 + + # limit number of orgs a user can create. + user_org = 10 + + # Global limit of users. + global_user = -1 + + # global limit of orgs. + global_org = -1 + + # global limit of dashboards + global_dashboard = -1 + + # global limit of api_keys + global_api_key = -1 + + # global limit on number of logged in users. + global_session = -1 + + #################################### Alerting ############################ + [alerting] + # Disable alerting engine & UI features + enabled = true + # Makes it possible to turn off alert rule execution but alerting UI is visible + execute_alerts = true + + #################################### Internal Grafana Metrics ############ + # Metrics available at HTTP API Url /api/metrics + [metrics] + enabled = true + interval_seconds = 10 + + # Send internal Grafana metrics to graphite + [metrics.graphite] + # Enable by setting the address setting (ex localhost:2003) + address = + prefix = prod.grafana.%(instance_name)s. + + [grafana_net] + url = https://grafana.com + + [grafana_com] + url = https://grafana.com + + #################################### Distributed tracing ############ + [tracing.jaeger] + # jaeger destination (ex localhost:6831) + address = + # tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2) + always_included_tag = + # Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote + sampler_type = const + # jaeger samplerconfig param + # for "const" sampler, 0 or 1 for always false/true respectively + # for "probabilistic" sampler, a probability between 0 and 1 + # for "rateLimiting" sampler, the number of spans per second + # for "remote" sampler, param is the same as for "probabilistic" + # and indicates the initial sampling rate before the actual one + # is received from the mothership + sampler_param = 1 + + #################################### External Image Storage ############## + [external_image_storage] + # You can choose between (s3, webdav, gcs) + provider = + + [external_image_storage.s3] + bucket_url = + bucket = + region = + path = + access_key = + secret_key = + + [external_image_storage.webdav] + url = + username = + password = + public_url = + + [external_image_storage.gcs] + key_file = + bucket = diff --git a/roles/openshift_grafana/files/grafana-ocp.yml b/roles/openshift_grafana/files/grafana-ocp.yml new file mode 100644 index 000000000..bc7b4b286 --- /dev/null +++ b/roles/openshift_grafana/files/grafana-ocp.yml @@ -0,0 +1,76 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: grafana-ocp + annotations: + "openshift.io/display-name": Grafana ocp + description: | + Grafana server with patched Prometheus datasource. + iconClass: icon-cogs + tags: "metrics,monitoring,grafana,prometheus" +parameters: +- description: External URL for the grafana route + name: ROUTE_URL + value: "" +- description: The namespace to instantiate heapster under. Defaults to 'grafana'. + name: NAMESPACE + value: grafana +objects: +- apiVersion: route.openshift.io/v1 + kind: Route + metadata: + name: grafana-ocp + namespace: "${NAMESPACE}" + spec: + host: "${ROUTE_URL}" + to: + name: grafana-ocp +- apiVersion: v1 + kind: Service + metadata: + name: grafana-ocp + namespace: "${NAMESPACE}" + labels: + metrics-infra: grafana-ocp + name: grafana-ocp + spec: + selector: + name: grafana-ocp + ports: + - port: 8082 + protocol: TCP + targetPort: grafana-http +- apiVersion: v1 + kind: ReplicationController + metadata: + name: grafana-ocp + namespace: "${NAMESPACE}" + labels: + metrics-infra: grafana-ocp + name: grafana-ocp + spec: + selector: + name: grafana-ocp + replicas: 1 + template: + version: v1 + metadata: + labels: + metrics-infra: grafana-ocp + name: grafana-ocp + spec: + volumes: + - name: data + emptyDir: {} + containers: + - image: "mrsiano/grafana-ocp:latest" + name: grafana-ocp + ports: + - name: grafana-http + containerPort: 3000 + volumeMounts: + - name: data + mountPath: "/root/go/src/github.com/grafana/grafana/data" + command: + - "./bin/grafana-server" diff --git a/roles/openshift_grafana/files/openshift-cluster-monitoring.json b/roles/openshift_grafana/files/openshift-cluster-monitoring.json new file mode 100644 index 000000000..f59ca997f --- /dev/null +++ b/roles/openshift_grafana/files/openshift-cluster-monitoring.json @@ -0,0 +1,5138 @@ +{ + "dashboard": { + "description": "Monitors Openshift cluster using Prometheus. Shows overall cluster CPU / Memory / Filesystem usage as well as individual pod, containers, systemd services statistics. Uses cAdvisor metrics only.", + "editable": true, + "gnetId": 315, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "rows": [ + { + "collapse": false, + "height": "200px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "height": "200px", + "id": 32, + "legend": { + "alignAsTable": false, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (container_network_receive_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[2m]))", + "format": "time_series", + "instant": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "Received", + "metric": "network", + "refId": "A", + "step": 1 + }, + { + "expr": "- sum (irate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[2m]))", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "Sent", + "metric": "network", + "refId": "B", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Network I/O pressure", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Network I/O pressure", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "180px", + "id": 4, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Cluster memory usage", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "180px", + "id": 6, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Cluster CPU usage ", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "180px", + "id": 7, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/mapper/docker_.*\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (container_fs_limit_bytes{device=~\"^/dev/mapper/docker_.*\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Cluster filesystem usage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "1px", + "id": 9, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "20%", + "prefix": "", + "prefixFontSize": "20%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "1px", + "id": 10, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "1px", + "id": 11, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": " cores", + "postfixFontSize": "30%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m]))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "1px", + "id": 12, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": " cores", + "postfixFontSize": "30%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "1px", + "id": 13, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/mapper/docker_.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "1px", + "id": 14, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_fs_limit_bytes{device=~\"^/dev/mapper/docker_.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Total usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 33, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) ", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "overall cpu usage", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Cluster CPU Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "height": "", + "id": 17, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name) * 100", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "metric": "container_cpu", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Pods CPU usage ", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": "% Usage", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Pods CPU usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "height": "", + "id": 24, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "pod: {{ pod_name }} | {{ container_name }}", + "metric": "container_cpu", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Containers Cores Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "cores", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Containers CPU usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "height": "", + "id": 23, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "{{ id }}", + "metric": "container_cpu", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "System services CPU usage ", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "cores", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "System services CPU usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 411, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 34, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (irate (container_memory_usage_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "{{ id }}", + "metric": "container_cpu", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "All processes Memory usage ", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": "cores", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "All processes CPU usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 25, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (pod_name)", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Pods memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Pods memory usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 26, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (container_memory_rss{systemd_service_name=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (systemd_service_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "{{ systemd_service_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "B", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "System services memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "System services memory usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 27, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}) by (container_name, pod_name)", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "pod: {{ pod_name }} | {{ container_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + }, + { + "expr": "sum (container_memory_working_set_bytes{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, name, image)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "container_memory_usage:sort_desc", + "refId": "B", + "step": 10 + }, + { + "expr": "sum (container_memory_working_set_bytes{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, rkt_container_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Containers memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Containers memory usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "500px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 28, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) by (id)", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "{{ id }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "All processes memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "All processes memory usage", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 30, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "-> pod: {{ pod_name }} | {{ container_name }}", + "metric": "network", + "refId": "B", + "step": 1 + }, + { + "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "<- pod: {{ pod_name }} | {{ container_name }}", + "metric": "network", + "refId": "D", + "step": 1 + }, + { + "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, name, image)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "-> docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "network", + "refId": "A", + "step": 1 + }, + { + "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, name, image)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "<- docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "network", + "refId": "C", + "step": 1 + }, + { + "expr": "sum (irate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, rkt_container_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "-> rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "network", + "refId": "E", + "step": 1 + }, + { + "expr": "- sum (irate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, rkt_container_name)", + "format": "time_series", + "hide": false, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "<- rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "network", + "refId": "F", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Containers network I/O ", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Containers network I/O", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 277, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 16, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name)", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "-> {{ pod_name }}", + "metric": "network", + "refId": "A", + "step": 1 + }, + { + "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name)", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "<- {{ pod_name }}", + "metric": "network", + "refId": "B", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Pods network I/O ", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Pods network I/O", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "500px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 29, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (container_network_receive_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)", + "format": "time_series", + "instant": true, + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "-> {{ id }}", + "metric": "network", + "refId": "A", + "step": 1 + }, + { + "expr": "- sum (irate (container_network_transmit_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "<- {{ id }}", + "metric": "network", + "refId": "B", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "All processes network I/O ", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "All processes network I/O", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 35, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(openshift_build_total) by (phase,reason)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ phase }} | {{ reason }}", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "openshift_build_total", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 54, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "count(openshift_build_active_time_seconds{phase=\"running\"} offset 10m)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns the number of builds that have been running for more than 10 minutes (600 seconds).", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 55, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "count(openshift_build_active_time_seconds{phase=\"pending\"} offset 10m)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns the number of build that have been waiting at least 10 minutes (600 seconds) to start.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 56, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(openshift_build_total{phase=\"Failed\"})", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns the number of failed builds, regardless of the failure reason.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 57, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "openshift_build_total{phase=\"Failed\",reason=\"FetchSourceFailed\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ instance }}", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns the number of failed builds because of problems retrieving source from the associated Git repository.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(openshift_build_total{phase=\"Complete\"})", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns the number of successfully completed builds.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 0, + "id": 59, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 1, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "openshift_build_total{phase=\"Failed\"} offset 5m", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ reason }}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns the failed builds totals, per failure reason, from 5 minutes ago.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "OpenShift Builds", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 36, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(openshift_sdn_pod_setup_latency_sum)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "openshift_sdn_pod_setup_latency_sum", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 41, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(openshift_sdn_pod_teardown_latency{quantile=\"0.9\"}) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "openshift_sdn_pod_teardown_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 50, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(10, (sum by (pod_name) (irate(container_network_receive_bytes_total{pod_name!=\"\"}[5m]))))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Top 10 pods doing the most receive network traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 37, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "openshift_sdn_pod_ips", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ instance }} | {{ role }}", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "openshift_sdn_pod_ips", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 39, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "garbage_collector_monitoring_route:openshift:io_v1_rate_limiter_use", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "garbage_collector_monitoring_route:openshift:io_v1_rate_limiter_use", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 42, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "openshift_sdn_arp_cache_entries", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ role }} | {{ instance }}", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "openshift_sdn_arp_cache_entries", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 40, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "openshift_sdn_arp_cache_entries", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "openshift_sdn_arp_cache_entries", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "OpenShift SDN", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 44, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(kubelet_pleg_relist_latency_microseconds{kubernetes_io_hostname=~\"$Node\",quantile=\"0.9\"}[2m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ role }} | {{ instance }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "kubelet_pleg_relist", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "µs", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 51, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(kubelet_docker_operations_latency_microseconds{quantile=\"0.9\"}[2m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ operation_type }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "kubelet_docker_operations_latency_microseconds{quantile=\"0.9\"}", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "µs", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 52, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "kubelet_docker_operations_timeout", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ operation_type }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns a running count (not a rate) of docker operations that have timed out since the kubelet was started.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 53, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "kubelet_docker_operations_errors", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ operation_type }}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Returns a running count (not a rate) of docker operations that have failed since the kubelet was started.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Kubelet", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 46, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(scrape_samples_scraped[2m])", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{ kubernetes_name }} | {{ instance }} ", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "scrape_samples_scraped", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 68, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum without (cpu) (irate(container_cpu_usage_seconds_total{container_name=\"prometheus\"}[5m])))", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU per instance of Prometheus container.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Prometheus", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 48, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum without (instance,type,client,contentType) (irate(apiserver_request_count{verb!~\"GET|LIST|WATCH\"}[2m]))) > 0", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ resource }} || {{ verb }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Number of mutating API requests being made to the control plane.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 49, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum without (instance,type,client,contentType) (irate(apiserver_request_count{verb=~\"GET|LIST|WATCH\"}[2m]))) > 0", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ resource }} || {{ pod }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Number of non-mutating API requests being made to the control plane.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 74, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "endpoint_queue_latency", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": " quantile {{ quantile }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "endpoint_queue_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "API Server", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 61, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "etcd_disk_wal_fsync_duration_seconds_count", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "etcd_disk_wal_fsync_duration_seconds_count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "etcd", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 62, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(changes(container_start_time_seconds[10m]))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "The number of containers that start or restart over the last ten minutes.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Changes in your cluster", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 63, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(machine_cpu_cores)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Total number of cores in the cluster.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 64, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(sort_desc(irate(container_cpu_usage_seconds_total{id=\"/\"}[5m])))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Total number of consumed cores.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 65, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum by (kubernetes_io_hostname,type) (irate(container_cpu_usage_seconds_total{id=\"/\"}[5m])))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU consumed per node in the cluster.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 66, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum by (cpu,id,pod_name,container_name) (irate(container_cpu_usage_seconds_total{role=\"infra\"}[5m])))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU consumption per system service or container on the infrastructure nodes.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 67, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum by (namespace) (irate(container_cpu_usage_seconds_total[5m])))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU consumed per namespace on the cluster.", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 47, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_cpu_usage_seconds_total{id=\"/\"}[3m])) / sum(machine_cpu_cores)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Percentage of total cluster CPU in use", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 69, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_rss) / sum(machine_memory_bytes)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Percentage of total cluster memory in use", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 70, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (kubernetes_io_hostname) (irate(container_cpu_usage_seconds_total{id=~\"/system.slice/(docker|etcd).service\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Aggregate CPU usage (seconds total) of etcd+docker", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "System and container CPU", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 71, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + { + "title": "Kubernetes Storage Metrics via Prometheus", + "type": "absolute", + "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g" + } + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "volumes_queue_latency", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "volumes_queue_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 72, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + { + "title": "Kubernetes Storage Metrics via Prometheus", + "type": "absolute", + "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g" + } + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(cloudprovider_gce_api_request_duration_seconds_count[2m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ request }}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "cloudprovider_aws_api_request_duration_seconds_count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PR}", + "fill": 1, + "id": 73, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + { + "title": "Kubernetes Storage Metrics via Prometheus", + "type": "absolute", + "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g" + } + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate(storage_operation_duration_seconds_sum{kubernetes_io_hostname=~\"$Node\"}[2m])) by (operation_name,kubernetes_io_hostname)", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "{{ operation_name }} || {{ kubernetes_io_hostname }}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "storage_operation_duration_seconds_sum", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "OpenShift Volumes", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes", + "openshift" + ], + "templating": { + "list": [ + { + "allValue": ".*", + "current": {}, + "datasource": "${DS_PR}", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "Node", + "options": [], + "query": "label_values(kubernetes_io_hostname)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "1s", + "2m", + "20s", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "openshift cluster monitoring", + "version": 6 + } +} diff --git a/roles/openshift_grafana/meta/main.yml b/roles/openshift_grafana/meta/main.yml new file mode 100644 index 000000000..8dea6f197 --- /dev/null +++ b/roles/openshift_grafana/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: Eldad Marciano + description: Setup grafana pod + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.3 + platforms: + - name: EL + versions: + - 7 + categories: + - metrics diff --git a/roles/openshift_grafana/tasks/gf-permissions.yml b/roles/openshift_grafana/tasks/gf-permissions.yml new file mode 100644 index 000000000..9d3c741ee --- /dev/null +++ b/roles/openshift_grafana/tasks/gf-permissions.yml @@ -0,0 +1,12 @@ +--- +- name: Create gf user on htpasswd + command: htpasswd -c /etc/origin/master/htpasswd gfadmin + +- name: Make sure master config use HTPasswdPasswordIdentityProvider + command: "sed -ie 's|AllowAllPasswordIdentityProvider|HTPasswdPasswordIdentityProvider\n file: /etc/origin/master/htpasswd|' /etc/origin/master/master-config.yaml" + +- name: Grant permission for gfuser + command: oc adm policy add-cluster-role-to-user cluster-reader gfadmin + +- name: Restart mater api + command: systemctl restart atomic-openshift-master-api.service diff --git a/roles/openshift_grafana/tasks/main.yml b/roles/openshift_grafana/tasks/main.yml new file mode 100644 index 000000000..6a06d40a9 --- /dev/null +++ b/roles/openshift_grafana/tasks/main.yml @@ -0,0 +1,122 @@ +--- +- name: Create grafana namespace + oc_project: + state: present + name: grafana + +- name: Configure Grafana Permissions + include_tasks: tasks/gf-permissions.yml + when: gf_oauth | default(false) | bool == true + +# TODO: we should grab this yaml file from openshift/origin +- name: Templatize grafana yaml + template: src=grafana-ocp.yaml dest=/tmp/grafana-ocp.yaml + register: + cl_file: /tmp/grafana-ocp.yaml + when: gf_oauth | default(false) | bool == false + +# TODO: we should grab this yaml file from openshift/origin +- name: Templatize grafana yaml + template: src=grafana-ocp-oauth.yaml dest=/tmp/grafana-ocp-oauth.yaml + register: + cl_file: /tmp/grafana-ocp-oauth.yaml + when: gf_oauth | default(false) | bool == true + +- name: Process the grafana file + oc_process: + namespace: grafana + template_name: "{{ cl_file }}" + create: True + when: gf_oauth | default(false) | bool == true + +- name: Wait to grafana be running + command: oc rollout status deployment/grafana-ocp + +- name: oc adm policy add-role-to-user view -z grafana-ocp -n {{ gf_prometheus_namespace }} + oc_adm_policy_user: + user: grafana-ocp + resource_kind: cluster-role + resource_name: view + state: present + role_namespace: "{{ gf_prometheus_namespace }}" + +- name: Get grafana route + oc_obj: + kind: route + name: grafana + namespace: grafana + register: route + +- name: Get prometheus route + oc_obj: + kind: route + name: prometheus + namespace: "{{ gf_prometheus_namespace }}" + register: route + +- name: Get the prometheus SA + oc_serviceaccount_secret: + state: list + service_account: prometheus + namespace: "{{ gf_prometheus_namespace }}" + register: sa + +- name: Get the management SA bearer token + set_fact: + management_token: "{{ sa.results | oo_filter_sa_secrets }}" + +- name: Ensure the SA bearer token value is read + oc_secret: + state: list + name: "{{ management_token }}" + namespace: "{{ gf_prometheus_namespace }}" + no_log: True + register: sa_secret + +- name: Get the SA bearer token for prometheus + set_fact: + token: "{{ sa_secret.results.encoded.token }}" + +- name: Convert to json + var: + ds_json: "{{ gf_body_tmp }} | to_json }}" + +- name: Set protocol type + var: + protocol: "{{ 'https' if {{ gf_oauth }} == true else 'http' }}" + +- name: Add gf datasrouce + uri: + url: "{{ protocol }}://{{ route }}/api/datasources" + user: admin + password: admin + method: POST + body: "{{ ds_json | regex_replace('grafana_name', {{ gf_datasource_name }}) | regex_replace('prometheus_url', 'https://'{{ prometheus }} ) | regex_replace('satoken', {{ token }}) }}" + headers: + Content-Type: "Content-Type: application/json" + register: add_ds + +- name: Regex setup ds name + replace: + path: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}" + regexp: '${DS_PR}' + replace: '{{ gf_datasource_name }}' + backup: yes + +- name: Add new dashboard + uri: + url: "{{ protocol }}://{{ route }}/api/dashboards/db" + user: admin + password: admin + method: POST + body: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}" + headers: + Content-Type: "Content-Type: application/json" + register: add_ds + +- name: Regex json tear down + replace: + path: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}" + regexp: '${DS_PR}' + replace: '{{ gf_datasource_name }}' + backup: yes diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py index 83e551b5d..b9c41d1b4 100644 --- a/roles/openshift_health_checker/openshift_checks/__init__.py +++ b/roles/openshift_health_checker/openshift_checks/__init__.py @@ -5,6 +5,7 @@ Health checks for OpenShift clusters. import json import operator import os +import re import time import collections @@ -309,28 +310,38 @@ class OpenShiftCheck(object): name_list = name_list.split(',') return [name.strip() for name in name_list if name.strip()] - @staticmethod - def get_major_minor_version(openshift_image_tag): + def get_major_minor_version(self, openshift_image_tag=None): """Parse and return the deployed version of OpenShift as a tuple.""" - if openshift_image_tag and openshift_image_tag[0] == 'v': - openshift_image_tag = openshift_image_tag[1:] - # map major release versions across releases - # to a common major version - openshift_major_release_version = { - "1": "3", - } + version = openshift_image_tag or self.get_var("openshift_image_tag") + components = [int(component) for component in re.findall(r'\d+', version)] - components = openshift_image_tag.split(".") - if not components or len(components) < 2: + if len(components) < 2: msg = "An invalid version of OpenShift was found for this host: {}" - raise OpenShiftCheckException(msg.format(openshift_image_tag)) + raise OpenShiftCheckException(msg.format(version)) + + # map major release version across releases to OCP major version + components[0] = {1: 3}.get(components[0], components[0]) + + return tuple(int(x) for x in components[:2]) + + def get_required_version(self, name, version_map): + """Return the correct required version(s) for the current (or nearest) OpenShift version.""" + openshift_version = self.get_major_minor_version() + + earliest = min(version_map) + latest = max(version_map) + if openshift_version < earliest: + return version_map[earliest] + if openshift_version > latest: + return version_map[latest] - if components[0] in openshift_major_release_version: - components[0] = openshift_major_release_version[components[0]] + required_version = version_map.get(openshift_version) + if not required_version: + msg = "There is no recommended version of {} for the current version of OpenShift ({})" + raise OpenShiftCheckException(msg.format(name, ".".join(str(comp) for comp in openshift_version))) - components = tuple(int(x) for x in components[:2]) - return components + return required_version def find_ansible_mount(self, path): """Return the mount point for path from ansible_mounts.""" diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 7afb8f730..145b82491 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -40,7 +40,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # to look for images available remotely without waiting to pull them. dependencies = ["python-docker-py", "skopeo"] # command for checking if remote registries have an image, without docker pull - skopeo_command = "timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}" + skopeo_command = "{proxyvars} timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}" skopeo_example_command = "skopeo inspect [--tls-verify=false] [--creds=<user>:<pass>] docker://<registry>/<image>" def __init__(self, *args, **kwargs): @@ -56,7 +56,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # ordered list of registries (according to inventory vars) that docker will try for unscoped images regs = self.ensure_list("openshift_docker_additional_registries") # currently one of these registries is added whether the user wants it or not. - deployment_type = self.get_var("openshift_deployment_type") + deployment_type = self.get_var("openshift_deployment_type", default="") if deployment_type == "origin" and "docker.io" not in regs: regs.append("docker.io") elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs: @@ -76,11 +76,20 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): if oreg_auth_user != '' and oreg_auth_password != '': oreg_auth_user = self.template_var(oreg_auth_user) oreg_auth_password = self.template_var(oreg_auth_password) - self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password)) + self.skopeo_command_creds = quote("--creds={}:{}".format(oreg_auth_user, oreg_auth_password)) # record whether we could reach a registry or not (and remember results) self.reachable_registries = {} + # take note of any proxy settings needed + proxies = [] + for var in ['http_proxy', 'https_proxy', 'no_proxy']: + # ansible vars are openshift_http_proxy, openshift_https_proxy, openshift_no_proxy + value = self.get_var("openshift_" + var, default=None) + if value: + proxies.append(var.upper() + "=" + quote(self.template_var(value))) + self.skopeo_proxy_vars = " ".join(proxies) + def is_active(self): """Skip hosts with unsupported deployment types.""" deployment_type = self.get_var("openshift_deployment_type") @@ -162,16 +171,21 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): required.add(self._registry_console_image(image_tag, image_info)) # images for containerized components - if self.get_var("openshift_is_containerized"): - components = set() + def add_var_or_default_img(var_name, comp_name): + """Returns: default image from comp_name, overridden by var_name in task_vars""" + default = "{}/{}:{}".format(image_info["namespace"], comp_name, image_tag) + required.add(self.template_var(self.get_var(var_name, default=default))) + + if self.get_var("openshift_is_containerized", convert=bool): if 'oo_nodes_to_config' in host_groups: - components.update(["node", "openvswitch"]) + add_var_or_default_img("osn_image", "node") + add_var_or_default_img("osn_ovs_image", "openvswitch") if 'oo_masters_to_config' in host_groups: # name is "origin" or "ose" - components.add(image_info["name"]) - for component in components: - required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag)) - if 'oo_etcd_to_config' in host_groups: # special case, note it is the same for origin/enterprise - required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag + add_var_or_default_img("osm_image", image_info["name"]) + if 'oo_etcd_to_config' in host_groups: + # special case, note default is the same for origin/enterprise and has no image tag + etcd_img = self.get_var("osm_etcd_image", default="registry.access.redhat.com/rhel7/etcd") + required.add(self.template_var(etcd_img)) return required @@ -249,11 +263,18 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): if not self.reachable_registries[registry]: continue # do not keep trying unreachable registries - args = dict(registry=registry, image=image) - args["tls"] = "false" if registry in self.registries["insecure"] else "true" - args["creds"] = self.skopeo_command_creds if registry == self.registries["oreg"] else "" + args = dict( + proxyvars=self.skopeo_proxy_vars, + tls="false" if registry in self.registries["insecure"] else "true", + creds=self.skopeo_command_creds if registry == self.registries["oreg"] else "", + registry=quote(registry), + image=quote(image), + ) - result = self.execute_module_with_retries("command", {"_raw_params": self.skopeo_command.format(**args)}) + result = self.execute_module_with_retries("command", { + "_uses_shell": True, + "_raw_params": self.skopeo_command.format(**args), + }) if result.get("rc", 0) == 0 and not result.get("failed"): return True if result.get("rc") == 124: # RC 124 == timed out; mark unreachable @@ -263,6 +284,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): def connect_to_registry(self, registry): """Use ansible wait_for module to test connectivity from host to registry. Returns bool.""" + if self.skopeo_proxy_vars != "": + # assume we can't connect directly; just waive the test + return True + # test a simple TCP connection host, _, port = registry.partition(":") port = port or 443 diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py index 986a01f38..7f8c6ebdc 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py +++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py @@ -170,7 +170,7 @@ class Elasticsearch(LoggingCheck): """ errors = [] for pod_name in pods_by_name.keys(): - df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name) + df_cmd = '-c elasticsearch exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name) disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json') lines = disk_output.splitlines() # expecting one header looking like 'IUse% Use%' and one body line diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py index 3b1cf8baa..16ec3a7f6 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/kibana.py +++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py @@ -5,12 +5,11 @@ Module for performing checks on a Kibana logging deployment import json import ssl -try: - from urllib2 import HTTPError, URLError - import urllib2 -except ImportError: - from urllib.error import HTTPError, URLError - import urllib.request as urllib2 +# pylint can't find the package when its installed in virtualenv +# pylint: disable=import-error,no-name-in-module +from ansible.module_utils.six.moves.urllib import request +# pylint: disable=import-error,no-name-in-module +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError from openshift_checks.logging.logging import LoggingCheck, OpenShiftCheckException @@ -65,7 +64,7 @@ class Kibana(LoggingCheck): # Verify that the url is returning a valid response try: # We only care if the url connects and responds - return_code = urllib2.urlopen(url, context=ctx).getcode() + return_code = request.urlopen(url, context=ctx).getcode() except HTTPError as httperr: return httperr.reason except URLError as urlerr: diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py index 0cad19842..58a2692bd 100644 --- a/roles/openshift_health_checker/openshift_checks/ovs_version.py +++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py @@ -3,7 +3,7 @@ Ansible module for determining if an installed version of Open vSwitch is incomp currently installed version of OpenShift. """ -from openshift_checks import OpenShiftCheck, OpenShiftCheckException +from openshift_checks import OpenShiftCheck from openshift_checks.mixins import NotContainerizedMixin @@ -16,10 +16,12 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck): tags = ["health"] openshift_to_ovs_version = { - "3.7": ["2.6", "2.7", "2.8"], - "3.6": ["2.6", "2.7", "2.8"], - "3.5": ["2.6", "2.7"], - "3.4": "2.4", + (3, 4): "2.4", + (3, 5): ["2.6", "2.7"], + (3, 6): ["2.6", "2.7", "2.8"], + (3, 7): ["2.6", "2.7", "2.8"], + (3, 8): ["2.6", "2.7", "2.8"], + (3, 9): ["2.6", "2.7", "2.8"], } def is_active(self): @@ -40,16 +42,5 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck): return self.execute_module("rpm_version", args) def get_required_ovs_version(self): - """Return the correct Open vSwitch version for the current OpenShift version""" - openshift_version_tuple = self.get_major_minor_version(self.get_var("openshift_image_tag")) - - if openshift_version_tuple < (3, 5): - return self.openshift_to_ovs_version["3.4"] - - openshift_version = ".".join(str(x) for x in openshift_version_tuple) - ovs_version = self.openshift_to_ovs_version.get(openshift_version) - if ovs_version: - return self.openshift_to_ovs_version[openshift_version] - - msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}" - raise OpenShiftCheckException(msg.format(openshift_version)) + """Return the correct Open vSwitch version(s) for the current OpenShift version.""" + return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version) diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index f3a628e28..28aee8b35 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -1,8 +1,6 @@ """Check that available RPM packages match the required versions.""" -import re - -from openshift_checks import OpenShiftCheck, OpenShiftCheckException +from openshift_checks import OpenShiftCheck from openshift_checks.mixins import NotContainerizedMixin @@ -18,6 +16,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): (3, 5): ["2.6", "2.7"], (3, 6): ["2.6", "2.7", "2.8"], (3, 7): ["2.6", "2.7", "2.8"], + (3, 8): ["2.6", "2.7", "2.8"], + (3, 9): ["2.6", "2.7", "2.8"], } openshift_to_docker_version = { @@ -27,11 +27,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): (3, 4): "1.12", (3, 5): "1.12", (3, 6): "1.12", - } - - # map major OpenShift release versions across releases to a common major version - map_major_release_version = { - 1: 3, + (3, 7): "1.12", + (3, 8): "1.12", + (3, 9): ["1.12", "1.13"], } def is_active(self): @@ -83,48 +81,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): def get_required_ovs_version(self): """Return the correct Open vSwitch version(s) for the current OpenShift version.""" - openshift_version = self.get_openshift_version_tuple() - - earliest = min(self.openshift_to_ovs_version) - latest = max(self.openshift_to_ovs_version) - if openshift_version < earliest: - return self.openshift_to_ovs_version[earliest] - if openshift_version > latest: - return self.openshift_to_ovs_version[latest] - - ovs_version = self.openshift_to_ovs_version.get(openshift_version) - if not ovs_version: - msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}" - raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version))) - - return ovs_version + return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version) def get_required_docker_version(self): """Return the correct Docker version(s) for the current OpenShift version.""" - openshift_version = self.get_openshift_version_tuple() - - earliest = min(self.openshift_to_docker_version) - latest = max(self.openshift_to_docker_version) - if openshift_version < earliest: - return self.openshift_to_docker_version[earliest] - if openshift_version > latest: - return self.openshift_to_docker_version[latest] - - docker_version = self.openshift_to_docker_version.get(openshift_version) - if not docker_version: - msg = "There is no recommended version of Docker for the current version of OpenShift: {}" - raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version))) - - return docker_version - - def get_openshift_version_tuple(self): - """Return received image tag as a normalized (X, Y) minor version tuple.""" - version = self.get_var("openshift_image_tag") - comps = [int(component) for component in re.findall(r'\d+', version)] - - if len(comps) < 2: - msg = "An invalid version of OpenShift was found for this host: {}" - raise OpenShiftCheckException(msg.format(version)) - - comps[0] = self.map_major_release_version.get(comps[0], comps[0]) - return tuple(comps[0:2]) + return self.get_required_version("Docker", self.openshift_to_docker_version) diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index 9fd6e049d..d31f263dd 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -276,11 +276,40 @@ def test_registry_console_image(task_vars, expected): assert expected == DockerImageAvailability(task_vars=task_vars)._registry_console_image(tag, info) -def test_containerized_etcd(): - task_vars = dict( +@pytest.mark.parametrize("task_vars, expected", [ + ( + dict( + group_names=['oo_nodes_to_config'], + osn_ovs_image='spam/ovs', + openshift_image_tag="veggs", + ), + set([ + 'spam/ovs', 'openshift/node:veggs', 'cockpit/kubernetes:latest', + 'openshift/origin-haproxy-router:veggs', 'openshift/origin-deployer:veggs', + 'openshift/origin-docker-registry:veggs', 'openshift/origin-pod:veggs', + ]), + ), ( + dict( + group_names=['oo_masters_to_config'], + ), + set(['openshift/origin:latest']), + ), ( + dict( + group_names=['oo_etcd_to_config'], + ), + set(['registry.access.redhat.com/rhel7/etcd']), + ), ( + dict( + group_names=['oo_etcd_to_config'], + osm_etcd_image='spam/etcd', + ), + set(['spam/etcd']), + ), +]) +def test_containerized(task_vars, expected): + task_vars.update(dict( openshift_is_containerized=True, openshift_deployment_type="origin", - group_names=['oo_etcd_to_config'], - ) - expected = set(['registry.access.redhat.com/rhel7/etcd']) + )) + assert expected == DockerImageAvailability(task_vars=task_vars).required_images() diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py index 04a5e89c4..750d4b9e9 100644 --- a/roles/openshift_health_checker/test/kibana_test.py +++ b/roles/openshift_health_checker/test/kibana_test.py @@ -1,12 +1,10 @@ import pytest import json -try: - import urllib2 - from urllib2 import HTTPError, URLError -except ImportError: - from urllib.error import HTTPError, URLError - import urllib.request as urllib2 +# pylint can't find the package when its installed in virtualenv +from ansible.module_utils.six.moves.urllib import request # pylint: disable=import-error +# pylint: disable=import-error +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError from openshift_checks.logging.kibana import Kibana, OpenShiftCheckException @@ -202,7 +200,7 @@ def test_verify_url_external_failure(lib_result, expect, monkeypatch): if type(lib_result) is int: return _http_return(lib_result) raise lib_result - monkeypatch.setattr(urllib2, 'urlopen', urlopen) + monkeypatch.setattr(request, 'urlopen', urlopen) check = Kibana() check._get_kibana_url = lambda: 'url' diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py index 0238f49d5..80c7a0541 100644 --- a/roles/openshift_health_checker/test/ovs_version_test.py +++ b/roles/openshift_health_checker/test/ovs_version_test.py @@ -1,26 +1,7 @@ import pytest -from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException - - -def test_openshift_version_not_supported(): - def execute_module(*_): - return {} - - openshift_release = '111.7.0' - - task_vars = dict( - openshift=dict(common=dict()), - openshift_release=openshift_release, - openshift_image_tag='v' + openshift_release, - openshift_deployment_type='origin', - openshift_service_type='origin' - ) - - with pytest.raises(OpenShiftCheckException) as excinfo: - OvsVersion(execute_module, task_vars).run() - - assert "no recommended version of Open vSwitch" in str(excinfo.value) +from openshift_checks.ovs_version import OvsVersion +from openshift_checks import OpenShiftCheckException def test_invalid_openshift_release_format(): diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index d2916f617..868b4bd12 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -1,6 +1,7 @@ import pytest -from openshift_checks.package_version import PackageVersion, OpenShiftCheckException +from openshift_checks.package_version import PackageVersion +from openshift_checks import OpenShiftCheckException def task_vars_for(openshift_release, deployment_type): @@ -18,7 +19,7 @@ def task_vars_for(openshift_release, deployment_type): def test_openshift_version_not_supported(): check = PackageVersion(None, task_vars_for("1.2.3", 'origin')) - check.get_openshift_version_tuple = lambda: (3, 4, 1) # won't be in the dict + check.get_major_minor_version = lambda: (3, 4, 1) # won't be in the dict with pytest.raises(OpenShiftCheckException) as excinfo: check.get_required_ovs_version() diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index f40085976..610de4f91 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -109,3 +109,5 @@ openshift_push_via_dns: False # NOTE: settting openshift_docker_hosted_registry_insecure may affect other roles openshift_hosted_docker_registry_insecure_default: "{{ openshift_docker_hosted_registry_insecure | default(False) }}" openshift_hosted_docker_registry_insecure: "{{ openshift_hosted_docker_registry_insecure_default }}" + +openshift_hosted_registry_storage_azure_blob_realm: core.windows.net diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml index 22294e3d4..bc4d81eb7 100644 --- a/roles/openshift_hosted/tasks/registry.yml +++ b/roles/openshift_hosted/tasks/registry.yml @@ -43,7 +43,7 @@ - name: Update registry environment variables when pushing via dns set_fact: - openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}" + openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'REGISTRY_OPENSHIFT_SERVER_ADDR':'docker-registry.default.svc:5000'}) }}" when: openshift_push_via_dns | bool - name: Update registry proxy settings for dc/docker-registry diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml index 77f020357..fef945d51 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml @@ -1,4 +1,10 @@ --- +- name: Create temp directory for doing work in + command: mktemp -d /tmp/openshift-hosted-ansible-XXXXXX + register: mktempHosted + changed_when: False + check_mode: no + - name: Generate GlusterFS registry endpoints template: src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2" @@ -14,3 +20,10 @@ with_items: - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml" + +- name: Delete temp directory + file: + name: "{{ mktempHosted.stdout }}" + state: absent + changed_when: False + check_mode: no diff --git a/roles/openshift_hosted/tasks/storage/registry_config.j2 b/roles/openshift_hosted/tasks/storage/registry_config.j2 deleted file mode 120000 index f3e82ad4f..000000000 --- a/roles/openshift_hosted/tasks/storage/registry_config.j2 +++ /dev/null @@ -1 +0,0 @@ -../../../templates/registry_config.j2
\ No newline at end of file diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml index cc3159a32..0786e2d2f 100644 --- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml @@ -102,7 +102,7 @@ objects: parameters: - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX - value: "openshift3/" + value: "registry.access.redhat.com/openshift3/" - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"' name: IMAGE_BASENAME value: "registry-console" diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml index 9f2e6125d..ccea54aaf 100644 --- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml @@ -102,7 +102,7 @@ objects: parameters: - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX - value: "openshift3/" + value: "registry.access.redhat.com/openshift3/" - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"' name: IMAGE_BASENAME value: "registry-console" diff --git a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml index f04ce06d3..15ad4e9af 100644 --- a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml @@ -102,7 +102,7 @@ objects: parameters: - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX - value: "openshift3/" + value: "registry.access.redhat.com/openshift3/" - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"' name: IMAGE_BASENAME value: "registry-console" diff --git a/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml index c178cf432..7acefa0f0 100644 --- a/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml @@ -102,7 +102,7 @@ objects: parameters: - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX - value: "openshift3/" + value: "registry.access.redhat.com/openshift3/" - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"' name: IMAGE_BASENAME value: "registry-console" diff --git a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 index de5a8d7c2..823f012af 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 @@ -38,7 +38,8 @@ defaults timeout check 10s maxconn {{ openshift_loadbalancer_default_maxconn | default(20000) }} -listen stats :9000 +listen stats + bind :9000 mode http stats enable stats uri / diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index a192bd67e..c438236a4 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -58,6 +58,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin - `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1. - `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. - `openshift_logging_kibana_edge_term_policy`: Insecure Edge Termination Policy. Defaults to Redirect. +- `openshift_logging_kibana_env_vars`: A map of environment variables to add to the kibana deployment config (e.g. {"ELASTICSEARCH_REQUESTTIMEOUT":"30000"}) - `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'. - `openshift_logging_fluentd_cpu_request`: The minimum amount of CPU to allocate for Fluentd collector pods. Defaults to '100m'. diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml index 4a2ee64f0..6fdba6580 100644 --- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml +++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml @@ -12,6 +12,7 @@ separator: '#' content: metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}" + metadata#annotations#openshift.io/logging.data.prefix: ".operations" with_items: "{{ __logging_ops_projects.stdout.split(' ') }}" loop_control: loop_var: project diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index fbc3e3fd1..ced7397b5 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -131,13 +131,13 @@ when: not openshift_logging_install_eventrouter | default(false) | bool -# Update asset config in openshift-web-console namespace -- name: Remove Kibana route information from web console asset config +# Update console config in openshift-web-console namespace +- name: Remove Kibana route information from the web console config include_role: name: openshift_web_console - tasks_from: update_asset_config.yml + tasks_from: update_console_config.yml vars: - asset_config_edits: - - key: loggingPublicURL + console_config_edits: + - key: clusterInfo#loggingPublicURL value: "" when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 0d7f8c056..a40449bf6 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -19,7 +19,7 @@ command: > {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt - --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test + --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test --overwrite=false check_mode: no when: - not ca_key_file.stat.exists diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index ebd2d747b..1b4bdb11f 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -71,10 +71,17 @@ - set_fact: openshift_logging_es_pvc_prefix="logging-es" when: openshift_logging_es_pvc_prefix == "" +# Using this module for setting this fact because otherwise we were getting a value of "" trying to +# use default() in the set_fact after this which caused us to not correctly evaluate +# openshift_logging_elasticsearch_storage_type +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + elasticsearch_storage_type: openshift_logging_elasticsearch_storage_type + - set_fact: - elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir') }}" + default_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir' }}" -# We don't allow scaling down of ES nodes currently - include_role: name: openshift_logging_elasticsearch vars: @@ -85,9 +92,10 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" - openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" + openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default('pvc' if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else 'hostmount' if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else 'emptydir' if outer_item.0.volumes['elasticsearch-storage'].emptyDir is defined else default_elasticsearch_storage_type) }}" + openshift_logging_elasticsearch_hostmount_path: "{{ outer_item.0.volumes['elasticsearch-storage'].hostPath.path if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else '' }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" - openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}" openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}" openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}" _es_containers: "{{ outer_item.0.containers}}" @@ -112,9 +120,9 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" - openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" + openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" - openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}" with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} loop_control: @@ -133,7 +141,7 @@ when: openshift_logging_es_ops_pvc_prefix == "" - set_fact: - elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0) else 'emptydir') }}" + default_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0) else 'emptydir' }}" when: - openshift_logging_use_ops | bool @@ -147,11 +155,12 @@ openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" - openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" + openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default('pvc' if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else 'hostmount' if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else 'emptydir' if outer_item.0.volumes['elasticsearch-storage'].emptyDir is defined else default_elasticsearch_storage_type) }}" + openshift_logging_elasticsearch_hostmount_path: "{{ outer_item.0.volumes['elasticsearch-storage'].hostPath.path if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else '' }}" openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" - openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}" @@ -189,11 +198,11 @@ openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" - openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" + openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}" openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" - openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}" + openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}" @@ -314,16 +323,16 @@ openshift_logging_install_eventrouter | default(false) | bool -# TODO: Remove when asset config is removed from master-config.yaml - include_tasks: update_master_config.yaml + when: not openshift.common.version_gte_3_9 # Update asset config in openshift-web-console namespace - name: Add Kibana route information to web console asset config include_role: name: openshift_web_console - tasks_from: update_asset_config.yml + tasks_from: update_console_config.yml vars: - asset_config_edits: - - key: loggingPublicURL + console_config_edits: + - key: clusterInfo#loggingPublicURL value: "https://{{ openshift_logging_kibana_hostname }}" when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml index bc817075d..d28d1d160 100644 --- a/roles/openshift_logging/tasks/procure_server_certs.yaml +++ b/roles/openshift_logging/tasks/procure_server_certs.yaml @@ -30,7 +30,7 @@ {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key - --signer-serial={{generated_certs_dir}}/ca.serial.txt + --signer-serial={{generated_certs_dir}}/ca.serial.txt --overwrite=false check_mode: no when: - cert_info.hostnames is defined diff --git a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml index 9182bddb2..16de6f252 100644 --- a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml +++ b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml @@ -1,6 +1,6 @@ --- - command: > - oc get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name} + oc get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name} register: _cluster_pods - name: "Getting ES version for logging-es cluster" @@ -10,7 +10,7 @@ when: _cluster_pods.stdout_lines | count > 0 - command: > - oc get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name} + oc get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name} register: _ops_cluster_pods - name: "Getting ES version for logging-es-ops cluster" diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml index d55beec86..879459cf6 100644 --- a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml +++ b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml @@ -1,91 +1,113 @@ --- -# Disable external communication for {{ _cluster_component }} -- name: Disable external communication for logging-{{ _cluster_component }} - oc_service: - state: present - name: "logging-{{ _cluster_component }}" - namespace: "{{ openshift_logging_elasticsearch_namespace }}" - selector: - component: "{{ _cluster_component }}" - provider: openshift - connection: blocked - labels: - logging-infra: 'support' - ports: - - port: 9200 - targetPort: "restapi" - when: - - full_restart_cluster | bool - ## get all pods for the cluster - command: > - oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name} + oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name} register: _cluster_pods -- name: "Disable shard balancing for logging-{{ _cluster_component }} cluster" - command: > - oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "none" } }' - register: _disable_output - changed_when: "'\"acknowledged\":true' in _disable_output.stdout" +### Check for cluster state before making changes -- if its red then we don't want to continue +- name: "Checking current health for {{ _es_node }} cluster" + shell: > + oc exec "{{ _cluster_pods.stdout.split(' ')[0] }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health + register: _pod_status when: _cluster_pods.stdout_lines | count > 0 -# Flush ES -- name: "Flushing for logging-{{ _cluster_component }} cluster" - command: > - oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced' - register: _flush_output - changed_when: "'\"acknowledged\":true' in _flush_output.stdout" - when: +- when: + - _pod_status.stdout is defined + - (_pod_status.stdout | from_json)['status'] in ['red'] + block: + - name: Set Logging message to manually restart + run_once: true + set_stats: + data: + installer_phase_logging: + message: "Cluster logging-{{ _cluster_component }} was in a red state and will not be automatically restarted. Please see documentation regarding doing a {{ 'full' if full_restart_cluster | bool else 'rolling'}} cluster restart." + + - debug: msg="Cluster logging-{{ _cluster_component }} was in a red state and will not be automatically restarted. Please see documentation regarding doing a {{ 'full' if full_restart_cluster | bool else 'rolling'}} cluster restart." + +- when: _pod_status.stdout is undefined or (_pod_status.stdout | from_json)['status'] in ['green', 'yellow'] + block: + # Disable external communication for {{ _cluster_component }} + - name: Disable external communication for logging-{{ _cluster_component }} + oc_service: + state: present + name: "logging-{{ _cluster_component }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + selector: + component: "{{ _cluster_component }}" + provider: openshift + connection: blocked + labels: + logging-infra: 'support' + ports: + - port: 9200 + targetPort: "restapi" + when: + - full_restart_cluster | bool + + - name: "Disable shard balancing for logging-{{ _cluster_component }} cluster" + command: > + oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "none" } }' + register: _disable_output + changed_when: "'\"acknowledged\":true' in _disable_output.stdout" + when: _cluster_pods.stdout_lines | count > 0 + + # Flush ES + - name: "Flushing for logging-{{ _cluster_component }} cluster" + command: > + oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced' + register: _flush_output + changed_when: "'\"acknowledged\":true' in _flush_output.stdout" + when: - _cluster_pods.stdout_lines | count > 0 - full_restart_cluster | bool -- command: > - oc get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name} - register: _cluster_dcs + - command: > + oc get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name} + register: _cluster_dcs -## restart all dcs for full restart -- name: "Restart ES node {{ _es_node }}" - include_tasks: restart_es_node.yml - with_items: "{{ _cluster_dcs }}" - loop_control: - loop_var: _es_node - when: + ## restart all dcs for full restart + - name: "Restart ES node {{ _es_node }}" + include_tasks: restart_es_node.yml + with_items: "{{ _cluster_dcs }}" + loop_control: + loop_var: _es_node + when: - full_restart_cluster | bool -## restart the node if it's dc is in the list of nodes to restart? -- name: "Restart ES node {{ _es_node }}" - include_tasks: restart_es_node.yml - with_items: "{{ _restart_logging_nodes }}" - loop_control: - loop_var: _es_node - when: + ## restart the node if it's dc is in the list of nodes to restart? + - name: "Restart ES node {{ _es_node }}" + include_tasks: restart_es_node.yml + with_items: "{{ _restart_logging_nodes }}" + loop_control: + loop_var: _es_node + when: - not full_restart_cluster | bool - _es_node in _cluster_dcs.stdout -## we may need a new first pod to run against -- fetch them all again -- command: > - oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name} - register: _cluster_pods + ## we may need a new first pod to run against -- fetch them all again + - command: > + oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name} + register: _cluster_pods -- name: "Enable shard balancing for logging-{{ _cluster_component }} cluster" - command: > - oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }' - register: _enable_output - changed_when: "'\"acknowledged\":true' in _enable_output.stdout" + - name: "Enable shard balancing for logging-{{ _cluster_component }} cluster" + command: > + oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }' + register: _enable_output + changed_when: "'\"acknowledged\":true' in _enable_output.stdout" -# Reenable external communication for {{ _cluster_component }} -- name: Reenable external communication for logging-{{ _cluster_component }} - oc_service: - state: present - name: "logging-{{ _cluster_component }}" - namespace: "{{ openshift_logging_elasticsearch_namespace }}" - selector: - component: "{{ _cluster_component }}" - provider: openshift - labels: - logging-infra: 'support' - ports: + # Reenable external communication for {{ _cluster_component }} + - name: Reenable external communication for logging-{{ _cluster_component }} + oc_service: + state: present + name: "logging-{{ _cluster_component }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + selector: + component: "{{ _cluster_component }}" + provider: openshift + labels: + logging-infra: 'support' + ports: - port: 9200 targetPort: "restapi" - when: + when: - full_restart_cluster | bool diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml index 6d0df40c8..fe15e40fd 100644 --- a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml +++ b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml @@ -26,12 +26,12 @@ - name: "Waiting for ES to be ready for {{ _es_node }}" shell: > - oc exec "{{ _pod }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- {{ __es_local_curl }} https://localhost:9200/_cat/health | cut -d' ' -f4 + oc exec "{{ _pod }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health with_items: "{{ _pods.stdout.split(' ') }}" loop_control: loop_var: _pod register: _pod_status - until: _pod_status.stdout in ['green', 'yellow'] + until: (_pod_status.stdout | from_json)['status'] in ['green', 'yellow'] retries: 60 delay: 5 changed_when: false diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml index 899193838..b69cbacae 100644 --- a/roles/openshift_logging_kibana/defaults/main.yml +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -18,6 +18,9 @@ openshift_logging_kibana_es_port: 9200 openshift_logging_kibana_replicas: 1 openshift_logging_kibana_edge_term_policy: Redirect +# map of env. var to add to the kibana deploymentconfig +openshift_logging_kibana_env_vars: {} + # this is used to determine if this is an operations deployment or a non-ops deployment # simply used for naming purposes openshift_logging_kibana_ops_deployment: false diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 3c3bd902e..c67235c62 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -251,6 +251,7 @@ kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}" kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}" kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}" + kibana_env_vars: "{{ openshift_logging_kibana_env_vars | default({}) }}" - name: Set Kibana DC oc_obj: diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2 index 57d216373..ed05b8458 100644 --- a/roles/openshift_logging_kibana/templates/kibana.j2 +++ b/roles/openshift_logging_kibana/templates/kibana.j2 @@ -70,6 +70,10 @@ spec: resourceFieldRef: containerName: kibana resource: limits.memory +{% for key, value in kibana_env_vars.items() %} + - name: "{{ key }}" + value: "{{ value }}" +{% endfor %} volumeMounts: - name: kibana mountPath: /etc/kibana/keys diff --git a/roles/openshift_manage_node/defaults/main.yml b/roles/openshift_manage_node/defaults/main.yml index f0e728a3f..00e04b9f2 100644 --- a/roles/openshift_manage_node/defaults/main.yml +++ b/roles/openshift_manage_node/defaults/main.yml @@ -4,3 +4,6 @@ openshift_manage_node_is_master: False # Default is to be schedulable except for master nodes. l_openshift_manage_schedulable: "{{ openshift_schedulable | default(not openshift_manage_node_is_master) }}" + +openshift_master_node_labels: + node-role.kubernetes.io/master: 'true' diff --git a/roles/openshift_manage_node/tasks/config.yml b/roles/openshift_manage_node/tasks/config.yml new file mode 100644 index 000000000..4f00351b5 --- /dev/null +++ b/roles/openshift_manage_node/tasks/config.yml @@ -0,0 +1,27 @@ +--- +- name: Set node schedulability + oc_adm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: "{{ 'true' if l_openshift_manage_schedulable | bool else 'false' }}" + retries: 10 + delay: 5 + register: node_schedulable + until: node_schedulable is succeeded + when: "'nodename' in openshift.node" + delegate_to: "{{ openshift_master_host }}" + +- name: Label nodes + oc_label: + name: "{{ openshift.node.nodename }}" + kind: node + state: add + labels: "{{ l_all_labels | lib_utils_oo_dict_to_list_of_dict }}" + namespace: default + when: + - "'nodename' in openshift.node" + - l_all_labels != {} + delegate_to: "{{ openshift_master_host }}" + vars: + l_node_labels: "{{ openshift_node_labels | default({}) }}" + l_master_labels: "{{ ('oo_masters_to_config' in group_names) | ternary(openshift_master_node_labels, {}) }}" + l_all_labels: "{{ l_node_labels | combine(l_master_labels) }}" diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 9251d380b..154e2b45f 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -34,25 +34,4 @@ when: "'nodename' in openshift.node" delegate_to: "{{ openshift_master_host }}" -- name: Set node schedulability - oc_adm_manage_node: - node: "{{ openshift.node.nodename | lower }}" - schedulable: "{{ 'true' if l_openshift_manage_schedulable | bool else 'false' }}" - retries: 10 - delay: 5 - register: node_schedulable - until: node_schedulable is succeeded - when: "'nodename' in openshift.node" - delegate_to: "{{ openshift_master_host }}" - -- name: Label nodes - oc_label: - name: "{{ openshift.node.nodename }}" - kind: node - state: add - labels: "{{ openshift_node_labels | lib_utils_oo_dict_to_list_of_dict }}" - namespace: default - when: - - "'nodename' in openshift.node" - - openshift_node_labels | default({}) != {} - delegate_to: "{{ openshift_master_host }}" +- include_tasks: config.yml diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml index b5e234b7f..57bc97e3e 100644 --- a/roles/openshift_management/defaults/main.yml +++ b/roles/openshift_management/defaults/main.yml @@ -15,6 +15,8 @@ openshift_management_pod_rollout_retries: 30 # # Choose 'miq-template' for a podified database install # Choose 'miq-template-ext-db' for an external database install +# TODO: Swap this var declaration once CFME is fully supported +#openshift_management_app_template: "{{ 'cfme-template' if openshift_deployment_type == 'openshift-enterprise' else 'miq-template' }}" openshift_management_app_template: miq-template # If you are using the miq-template-ext-db template then you must add # the required database parameters to the diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml index c3bc1d20c..48d1d4e26 100644 --- a/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml +++ b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: postgresql - image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest + image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest command: - "/opt/rh/cfme-container-scripts/backup_db" env: diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml index 8b23f8a33..7fd4fc2e1 100644 --- a/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml +++ b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: postgresql - image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest + image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest command: - "/opt/rh/cfme-container-scripts/restore_db" env: diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml index 4a04f3372..9866c29c3 100644 --- a/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml +++ b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml @@ -31,6 +31,7 @@ objects: name: "${NAME}-secrets" stringData: pg-password: "${DATABASE_PASSWORD}" + admin-password: "${APPLICATION_ADMIN_PASSWORD}" database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 v2-key: "${V2_KEY}" - apiVersion: v1 @@ -90,15 +91,15 @@ objects: - name: cloudforms image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" livenessProbe: - tcpSocket: - port: 80 + exec: + command: + - pidof + - MIQ Server initialDelaySeconds: 480 timeoutSeconds: 3 readinessProbe: - httpGet: - path: "/" + tcpSocket: port: 80 - scheme: HTTP initialDelaySeconds: 200 timeoutSeconds: 3 ports: @@ -126,6 +127,11 @@ objects: secretKeyRef: name: "${NAME}-secrets" key: v2-key + - name: APPLICATION_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: admin-password - name: ANSIBLE_ADMIN_PASSWORD valueFrom: secretKeyRef: @@ -433,18 +439,173 @@ objects: <VirtualHost *:80> KeepAlive on + # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP + ServerName https://%{REQUEST_HOST} + ProxyPreserveHost on - ProxyPass /ws/ ws://${NAME}/ws/ - ProxyPassReverse /ws/ ws://${NAME}/ws/ - ProxyPass / http://${NAME}/ + + RewriteCond %{REQUEST_URI} ^/ws [NC] + RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC] + RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC] + RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L] + + # For httpd, some ErrorDocuments must by served by the httpd pod + RewriteCond %{REQUEST_URI} !^/proxy_pages + + # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod + RewriteCond %{REQUEST_URI} !^/saml2 + RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L] ProxyPassReverse / http://${NAME}/ + + # Ensures httpd stdout/stderr are seen by docker logs. + ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log" + CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common </VirtualHost> + authentication.conf: | + # Load appropriate authentication configuration files + # + Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth" + configuration-internal-auth: | + # Internal authentication + # + configuration-external-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/http.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-active-directory-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/krb5.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-saml-auth: | + LoadModule auth_mellon_module modules/mod_auth_mellon.so + + <Location /> + MellonEnable "info" + + MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml" + + MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key" + MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert" + MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml" + + MellonVariable "sp-cookie" + MellonSecureCookie On + MellonCookiePath "/" + + MellonIdP "IDP" + + MellonEndpointPath "/saml2" + + MellonUser username + MellonMergeEnvVars On + + MellonSetEnvNoPrefix "REMOTE_USER" username + MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email + MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname + MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname + MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname + MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups + </Location> + + <Location /saml_login> + AuthType "Mellon" + MellonEnable "auth" + Require valid-user + </Location> + + Include "conf.d/external-auth-remote-user-conf" + external-auth-load-modules-conf: | + LoadModule authnz_pam_module modules/mod_authnz_pam.so + LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so + LoadModule lookup_identity_module modules/mod_lookup_identity.so + LoadModule auth_kerb_module modules/mod_auth_kerb.so + external-auth-login-form-conf: | + <Location /dashboard/external_authenticate> + InterceptFormPAMService httpd-auth + InterceptFormLogin user_name + InterceptFormPassword user_password + InterceptFormLoginSkip admin + InterceptFormClearRemoteUserForSkipped on + </Location> + external-auth-application-api-conf: | + <LocationMatch ^/api> + SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in + SetEnvIf X-Auth-Token '^.+$' let_api_token_in + SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in + + AuthType Basic + AuthName "External Authentication (httpd) for API" + AuthBasicProvider PAM + + AuthPAMService httpd-auth + Require valid-user + Order Allow,Deny + Allow from env=let_admin_in + Allow from env=let_api_token_in + Allow from env=let_sys_token_in + Satisfy Any + </LocationMatch> + external-auth-lookup-user-details-conf: | + <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api> + LookupUserAttr mail REMOTE_USER_EMAIL + LookupUserAttr givenname REMOTE_USER_FIRSTNAME + LookupUserAttr sn REMOTE_USER_LASTNAME + LookupUserAttr displayname REMOTE_USER_FULLNAME + LookupUserAttr domainname REMOTE_USER_DOMAIN + + LookupUserGroups REMOTE_USER_GROUPS ":" + LookupDbusTimeout 5000 + </LocationMatch> + external-auth-remote-user-conf: | + RequestHeader unset X_REMOTE_USER + + RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER + RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR + RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL + RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME + RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME + RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME + RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS + RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN - apiVersion: v1 kind: ConfigMap metadata: name: "${HTTPD_SERVICE_NAME}-auth-configs" data: auth-type: internal + auth-kerberos-realms: undefined auth-configuration.conf: | # External Authentication Configuration File # @@ -464,6 +625,20 @@ objects: selector: name: httpd - apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_DBUS_API_SERVICE_NAME}" + annotations: + description: Exposes the httpd server dbus api + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http-dbus-api + port: 8080 + targetPort: 8080 + selector: + name: httpd +- apiVersion: v1 kind: DeploymentConfig metadata: name: "${HTTPD_SERVICE_NAME}" @@ -497,6 +672,9 @@ objects: image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" ports: - containerPort: 80 + protocol: TCP + - containerPort: 8080 + protocol: TCP livenessProbe: exec: command: @@ -526,6 +704,11 @@ objects: configMapKeyRef: name: "${HTTPD_SERVICE_NAME}-auth-configs" key: auth-type + - name: HTTPD_AUTH_KERBEROS_REALMS + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-kerberos-realms lifecycle: postStart: exec: @@ -581,6 +764,11 @@ parameters: displayName: Application Database Region description: Database region that will be used for application. value: '0' +- name: APPLICATION_ADMIN_PASSWORD + displayName: Application Admin Password + required: true + description: Admin password that will be set on the application. + value: smartvm - name: ANSIBLE_DATABASE_NAME displayName: Ansible PostgreSQL database name required: true @@ -678,7 +866,7 @@ parameters: - name: MEMCACHED_IMG_NAME displayName: Memcached Image Name description: This is the Memcached image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached - name: MEMCACHED_IMG_TAG displayName: Memcached Image Tag description: This is the Memcached image tag/version requested to deploy. @@ -686,11 +874,11 @@ parameters: - name: FRONTEND_APPLICATION_IMG_NAME displayName: Frontend Application Image Name description: This is the Frontend Application image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui - name: BACKEND_APPLICATION_IMG_NAME displayName: Backend Application Image Name description: This is the Backend Application image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app - name: FRONTEND_APPLICATION_IMG_TAG displayName: Front end Application Image Tag description: This is the CloudForms Frontend Application image tag/version requested to deploy. @@ -702,7 +890,7 @@ parameters: - name: ANSIBLE_IMG_NAME displayName: Ansible Image Name description: This is the Ansible image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible - name: ANSIBLE_IMG_TAG displayName: Ansible Image Tag description: This is the Ansible image tag/version requested to deploy. @@ -730,10 +918,15 @@ parameters: displayName: Apache httpd Service Name description: The name of the OpenShift Service exposed for the httpd container. value: httpd +- name: HTTPD_DBUS_API_SERVICE_NAME + required: true + displayName: Apache httpd DBus API Service Name + description: The name of httpd dbus api service. + value: httpd-dbus-api - name: HTTPD_IMG_NAME displayName: Apache httpd Image Name description: This is the httpd image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd - name: HTTPD_IMG_TAG displayName: Apache httpd Image Tag description: This is the httpd image tag/version requested to deploy. diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml index d7c9f5af7..5c757b6c2 100644 --- a/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml +++ b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml @@ -31,6 +31,7 @@ objects: name: "${NAME}-secrets" stringData: pg-password: "${DATABASE_PASSWORD}" + admin-password: "${APPLICATION_ADMIN_PASSWORD}" database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5 v2-key: "${V2_KEY}" - apiVersion: v1 @@ -128,18 +129,173 @@ objects: <VirtualHost *:80> KeepAlive on + # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP + ServerName https://%{REQUEST_HOST} + ProxyPreserveHost on - ProxyPass /ws/ ws://${NAME}/ws/ - ProxyPassReverse /ws/ ws://${NAME}/ws/ - ProxyPass / http://${NAME}/ + + RewriteCond %{REQUEST_URI} ^/ws [NC] + RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC] + RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC] + RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L] + + # For httpd, some ErrorDocuments must by served by the httpd pod + RewriteCond %{REQUEST_URI} !^/proxy_pages + + # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod + RewriteCond %{REQUEST_URI} !^/saml2 + RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L] ProxyPassReverse / http://${NAME}/ + + # Ensures httpd stdout/stderr are seen by docker logs. + ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log" + CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common </VirtualHost> + authentication.conf: | + # Load appropriate authentication configuration files + # + Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth" + configuration-internal-auth: | + # Internal authentication + # + configuration-external-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/http.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-active-directory-auth: | + Include "conf.d/external-auth-load-modules-conf" + + <Location /dashboard/kerberos_authenticate> + AuthType Kerberos + AuthName "Kerberos Login" + KrbMethodNegotiate On + KrbMethodK5Passwd Off + KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS} + Krb5KeyTab /etc/krb5.keytab + KrbServiceName Any + Require pam-account httpd-auth + + ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js + </Location> + + Include "conf.d/external-auth-login-form-conf" + Include "conf.d/external-auth-application-api-conf" + Include "conf.d/external-auth-lookup-user-details-conf" + Include "conf.d/external-auth-remote-user-conf" + configuration-saml-auth: | + LoadModule auth_mellon_module modules/mod_auth_mellon.so + + <Location /> + MellonEnable "info" + + MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml" + + MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key" + MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert" + MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml" + + MellonVariable "sp-cookie" + MellonSecureCookie On + MellonCookiePath "/" + + MellonIdP "IDP" + + MellonEndpointPath "/saml2" + + MellonUser username + MellonMergeEnvVars On + + MellonSetEnvNoPrefix "REMOTE_USER" username + MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email + MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname + MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname + MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname + MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups + </Location> + + <Location /saml_login> + AuthType "Mellon" + MellonEnable "auth" + Require valid-user + </Location> + + Include "conf.d/external-auth-remote-user-conf" + external-auth-load-modules-conf: | + LoadModule authnz_pam_module modules/mod_authnz_pam.so + LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so + LoadModule lookup_identity_module modules/mod_lookup_identity.so + LoadModule auth_kerb_module modules/mod_auth_kerb.so + external-auth-login-form-conf: | + <Location /dashboard/external_authenticate> + InterceptFormPAMService httpd-auth + InterceptFormLogin user_name + InterceptFormPassword user_password + InterceptFormLoginSkip admin + InterceptFormClearRemoteUserForSkipped on + </Location> + external-auth-application-api-conf: | + <LocationMatch ^/api> + SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in + SetEnvIf X-Auth-Token '^.+$' let_api_token_in + SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in + + AuthType Basic + AuthName "External Authentication (httpd) for API" + AuthBasicProvider PAM + + AuthPAMService httpd-auth + Require valid-user + Order Allow,Deny + Allow from env=let_admin_in + Allow from env=let_api_token_in + Allow from env=let_sys_token_in + Satisfy Any + </LocationMatch> + external-auth-lookup-user-details-conf: | + <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api> + LookupUserAttr mail REMOTE_USER_EMAIL + LookupUserAttr givenname REMOTE_USER_FIRSTNAME + LookupUserAttr sn REMOTE_USER_LASTNAME + LookupUserAttr displayname REMOTE_USER_FULLNAME + LookupUserAttr domainname REMOTE_USER_DOMAIN + + LookupUserGroups REMOTE_USER_GROUPS ":" + LookupDbusTimeout 5000 + </LocationMatch> + external-auth-remote-user-conf: | + RequestHeader unset X_REMOTE_USER + + RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER + RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR + RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL + RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME + RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME + RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME + RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS + RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN - apiVersion: v1 kind: ConfigMap metadata: name: "${HTTPD_SERVICE_NAME}-auth-configs" data: auth-type: internal + auth-kerberos-realms: undefined auth-configuration.conf: | # External Authentication Configuration File # @@ -203,15 +359,15 @@ objects: - name: cloudforms image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}" livenessProbe: - tcpSocket: - port: 80 + exec: + command: + - pidof + - MIQ Server initialDelaySeconds: 480 timeoutSeconds: 3 readinessProbe: - httpGet: - path: "/" + tcpSocket: port: 80 - scheme: HTTP initialDelaySeconds: 200 timeoutSeconds: 3 ports: @@ -239,6 +395,11 @@ objects: secretKeyRef: name: "${NAME}-secrets" key: v2-key + - name: APPLICATION_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: admin-password - name: ANSIBLE_ADMIN_PASSWORD valueFrom: secretKeyRef: @@ -611,6 +772,20 @@ objects: selector: name: httpd - apiVersion: v1 + kind: Service + metadata: + name: "${HTTPD_DBUS_API_SERVICE_NAME}" + annotations: + description: Exposes the httpd server dbus api + service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]' + spec: + ports: + - name: http-dbus-api + port: 8080 + targetPort: 8080 + selector: + name: httpd +- apiVersion: v1 kind: DeploymentConfig metadata: name: "${HTTPD_SERVICE_NAME}" @@ -644,6 +819,9 @@ objects: image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}" ports: - containerPort: 80 + protocol: TCP + - containerPort: 8080 + protocol: TCP livenessProbe: exec: command: @@ -673,6 +851,11 @@ objects: configMapKeyRef: name: "${HTTPD_SERVICE_NAME}-auth-configs" key: auth-type + - name: HTTPD_AUTH_KERBEROS_REALMS + valueFrom: + configMapKeyRef: + name: "${HTTPD_SERVICE_NAME}-auth-configs" + key: auth-kerberos-realms lifecycle: postStart: exec: @@ -718,6 +901,11 @@ parameters: displayName: Application Database Region description: Database region that will be used for application. value: '0' +- name: APPLICATION_ADMIN_PASSWORD + displayName: Application Admin Password + required: true + description: Admin password that will be set on the application. + value: smartvm - name: ANSIBLE_DATABASE_NAME displayName: Ansible PostgreSQL database name required: true @@ -842,7 +1030,7 @@ parameters: - name: POSTGRESQL_IMG_NAME displayName: PostgreSQL Image Name description: This is the PostgreSQL image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql - name: POSTGRESQL_IMG_TAG displayName: PostgreSQL Image Tag description: This is the PostgreSQL image tag/version requested to deploy. @@ -850,7 +1038,7 @@ parameters: - name: MEMCACHED_IMG_NAME displayName: Memcached Image Name description: This is the Memcached image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached - name: MEMCACHED_IMG_TAG displayName: Memcached Image Tag description: This is the Memcached image tag/version requested to deploy. @@ -858,11 +1046,11 @@ parameters: - name: FRONTEND_APPLICATION_IMG_NAME displayName: Frontend Application Image Name description: This is the Frontend Application image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui - name: BACKEND_APPLICATION_IMG_NAME displayName: Backend Application Image Name description: This is the Backend Application image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app - name: FRONTEND_APPLICATION_IMG_TAG displayName: Front end Application Image Tag description: This is the CloudForms Frontend Application image tag/version requested to deploy. @@ -874,7 +1062,7 @@ parameters: - name: ANSIBLE_IMG_NAME displayName: Ansible Image Name description: This is the Ansible image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible - name: ANSIBLE_IMG_TAG displayName: Ansible Image Tag description: This is the Ansible image tag/version requested to deploy. @@ -907,10 +1095,15 @@ parameters: displayName: Apache httpd Service Name description: The name of the OpenShift Service exposed for the httpd container. value: httpd +- name: HTTPD_DBUS_API_SERVICE_NAME + required: true + displayName: Apache httpd DBus API Service Name + description: The name of httpd dbus api service. + value: httpd-dbus-api - name: HTTPD_IMG_NAME displayName: Apache httpd Image Name description: This is the httpd image name requested to deploy. - value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd + value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd - name: HTTPD_IMG_TAG displayName: Apache httpd Image Tag description: This is the httpd image tag/version requested to deploy. diff --git a/roles/openshift_management/tasks/accounts.yml b/roles/openshift_management/tasks/accounts.yml index e45ea8d43..80318fec0 100644 --- a/roles/openshift_management/tasks/accounts.yml +++ b/roles/openshift_management/tasks/accounts.yml @@ -5,14 +5,14 @@ oc_serviceaccount: namespace: "{{ openshift_management_project }}" state: present - name: "{{ openshift_management_flavor_short }}{{ item.name }}" + name: "{{ __openshift_management_flavor_short }}{{ item.name }}" with_items: - "{{ __openshift_system_account_sccs }}" - name: Ensure the CFME system accounts have all the required SCCs oc_adm_policy_user: namespace: "{{ openshift_management_project }}" - user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" + user: "system:serviceaccount:{{ openshift_management_project }}:{{ __openshift_management_flavor_short }}{{ item.name }}" resource_kind: scc resource_name: "{{ item.resource_name }}" with_items: @@ -21,7 +21,7 @@ - name: Ensure the CFME system accounts have the required roles oc_adm_policy_user: namespace: "{{ openshift_management_project }}" - user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" + user: "system:serviceaccount:{{ openshift_management_project }}:{{ __openshift_management_flavor_short }}{{ item.name }}" resource_kind: role resource_name: "{{ item.resource_name }}" with_items: diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml index c4b204b98..5209eba56 100644 --- a/roles/openshift_management/tasks/main.yml +++ b/roles/openshift_management/tasks/main.yml @@ -71,15 +71,15 @@ # CREATE APP - name: Note the correct ext-db template name set_fact: - openshift_management_template_name: "{{ openshift_management_flavor }}-ext-db" + openshift_management_template_name: "{{ __openshift_management_flavor }}-ext-db" when: - - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] + - __openshift_management_use_ext_db - name: Note the correct podified db template name set_fact: - openshift_management_template_name: "{{ openshift_management_flavor }}" + openshift_management_template_name: "{{ __openshift_management_flavor }}" when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db - name: Ensure the Management App is created oc_process: @@ -89,7 +89,7 @@ params: "{{ openshift_management_template_parameters }}" - name: Wait for the app to come up. May take several minutes, 30s check intervals, {{ openshift_management_pod_rollout_retries }} retries - command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}" + command: "oc logs {{ __openshift_management_flavor }}-0 -n {{ openshift_management_project }}" register: app_seeding_logs until: app_seeding_logs.stdout.find('Server starting complete') != -1 delay: 30 diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml index d1b9a8d5c..1f8cac6c6 100644 --- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml +++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml @@ -12,7 +12,7 @@ when: - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is not defined -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db block: - name: Note the DB PV Size from Template Parameters set_fact: @@ -31,7 +31,7 @@ namespace: "{{ openshift_management_project }}" state: list kind: pv - name: "{{ openshift_management_flavor_short }}-app" + name: "{{ __openshift_management_flavor_short }}-app" register: miq_app_pv_check - name: Check if the Management DB PV has been created @@ -39,15 +39,15 @@ namespace: "{{ openshift_management_project }}" state: list kind: pv - name: "{{ openshift_management_flavor_short }}-db" + name: "{{ __openshift_management_flavor_short }}-db" register: miq_db_pv_check when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db - name: Ensure the Management App PV is created oc_process: namespace: "{{ openshift_management_project }}" - template_name: "{{ openshift_management_flavor }}-app-pv" + template_name: "{{ __openshift_management_flavor }}-app-pv" create: True params: PV_SIZE: "{{ openshift_management_app_pv_size }}" @@ -58,12 +58,12 @@ - name: Ensure the Management DB PV is created oc_process: namespace: "{{ openshift_management_project }}" - template_name: "{{ openshift_management_flavor }}-db-pv" + template_name: "{{ __openshift_management_flavor }}-db-pv" create: True params: PV_SIZE: "{{ openshift_management_db_pv_size }}" BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}" NFS_HOST: "{{ openshift_management_nfs_server }}" when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db - miq_db_pv_check.results.results == [{}] diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml index 9e3a4d43a..4a00efb1d 100644 --- a/roles/openshift_management/tasks/storage/nfs.yml +++ b/roles/openshift_management/tasks/storage/nfs.yml @@ -17,8 +17,8 @@ tasks_from: create_export vars: l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" - l_nfs_export_config: "{{ openshift_management_flavor_short }}" - l_nfs_export_name: "{{ openshift_management_flavor_short }}-app" + l_nfs_export_config: "{{ __openshift_management_flavor_short }}" + l_nfs_export_name: "{{ __openshift_management_flavor_short }}-app" l_nfs_options: "*(rw,no_root_squash,no_wdelay)" - name: Create the DB export @@ -27,10 +27,10 @@ tasks_from: create_export vars: l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" - l_nfs_export_config: "{{ openshift_management_flavor_short }}" - l_nfs_export_name: "{{ openshift_management_flavor_short }}-db" + l_nfs_export_config: "{{ __openshift_management_flavor_short }}" + l_nfs_export_name: "{{ __openshift_management_flavor_short }}-db" l_nfs_options: "*(rw,no_root_squash,no_wdelay)" when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db delegate_to: "{{ openshift_management_nfs_server }}" diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml index 9f97cdcb9..f40af7349 100644 --- a/roles/openshift_management/tasks/template.yml +++ b/roles/openshift_management/tasks/template.yml @@ -13,59 +13,59 @@ ###################################################################### # STANDARD PODIFIED DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db block: - name: Check if the Management Server template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}" + name: "{{ __openshift_management_flavor }}" register: miq_server_check - when: miq_server_check.results.results == [{}] block: - name: Copy over Management Server template copy: - src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-template.yaml" dest: "{{ template_dir }}/" - name: Ensure Management Server Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}" + name: "{{ __openshift_management_flavor }}" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-template.yaml" ###################################################################### # EXTERNAL DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] +- when: __openshift_management_use_ext_db block: - name: Check if the Management Ext-DB Server template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}-ext-db" + name: "{{ __openshift_management_flavor }}-ext-db" register: miq_ext_db_server_check - when: miq_ext_db_server_check.results.results == [{}] block: - name: Copy over Management Ext-DB Server template copy: - src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{__openshift_management_flavor_short}}-template-ext-db.yaml" dest: "{{ template_dir }}/" - name: Ensure Management Ext-DB Server Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}-ext-db" + name: "{{ __openshift_management_flavor }}-ext-db" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template-ext-db.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-template-ext-db.yaml" # End app template creation. ###################################################################### @@ -79,50 +79,50 @@ namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}-app-pv" + name: "{{ __openshift_management_flavor }}-app-pv" register: miq_app_pv_check - when: miq_app_pv_check.results.results == [{}] block: - name: Copy over Management App PV template copy: - src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-pv-server-example.yaml" dest: "{{ template_dir }}/" - name: Ensure Management App PV Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}-app-pv" + name: "{{ __openshift_management_flavor }}-app-pv" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-pv-server-example.yaml" #--------------------------------------------------------------------- # Required for database if the installation is fully podified -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db block: - name: Check if the Management DB PV template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}-db-pv" + name: "{{ __openshift_management_flavor }}-db-pv" register: miq_db_pv_check - when: miq_db_pv_check.results.results == [{}] block: - name: Copy over Management DB PV template copy: - src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-pv-db-example.yaml" dest: "{{ template_dir }}/" - name: Ensure Management DB PV Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}-db-pv" + name: "{{ __openshift_management_flavor }}-db-pv" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-pv-db-example.yaml" diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml index b22f36a4f..2dc895190 100644 --- a/roles/openshift_management/tasks/validate.yml +++ b/roles/openshift_management/tasks/validate.yml @@ -100,4 +100,4 @@ 'openshift_management_template_parameters'" with_items: "{{ __openshift_management_required_db_conn_params }}" when: - - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] + - __openshift_management_use_ext_db diff --git a/roles/openshift_management/vars/main.yml b/roles/openshift_management/vars/main.yml index da3ad0af7..d7b18df3a 100644 --- a/roles/openshift_management/vars/main.yml +++ b/roles/openshift_management/vars/main.yml @@ -30,14 +30,18 @@ __openshift_management_db_parameters: - DATABASE_PORT - DATABASE_NAME -# # Commented out until we can support both CFME and MIQ -# # openshift_management_flavor: "{{ 'cloudforms' if openshift_deployment_type == 'openshift-enterprise' else 'manageiq' }}" -#openshift_management_flavor: cloudforms -openshift_management_flavor: manageiq -# TODO: Make this conditional as well based on the prior variable -# # openshift_management_flavor_short: "{{ 'cfme' if openshift_deployment_type == 'openshift-enterprise' else 'miq' }}" -# openshift_management_flavor_short: cfme -openshift_management_flavor_short: miq +__openshift_management_flavors: + miq: + short: miq + long: manageiq + cfme: + short: cfme + long: cloudforms + +__openshift_management_flavor: "{{ __openshift_management_flavors[openshift_management_app_template.split('-')[0]]['long'] }}" +__openshift_management_flavor_short: "{{ __openshift_management_flavors[openshift_management_app_template.split('-')[0]]['short'] }}" + +__openshift_management_use_ext_db: "{{ true if 'ext-db' in openshift_management_app_template else false }}" ###################################################################### # ACCOUNTING diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml index 7870f43e2..4564f33dd 100644 --- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml @@ -8,8 +8,10 @@ # TODO: If the sdn package isn't already installed this will install it, we # should fix that -- name: Upgrade master packages - package: name={{ master_pkgs | join(',') }} state=present +- name: Upgrade master packages - yum + command: + yum install -y {{ master_pkgs | join(' ') }} \ + {{ ' --exclude *' ~ openshift_service_type ~ '*3.9*' if openshift_release | version_compare('3.9','<') else '' }} vars: master_pkgs: - "{{ openshift_service_type }}{{ openshift_pkg_version | default('') }}" @@ -17,6 +19,21 @@ - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version | default('') }}" - "{{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }}" - - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" register: result until: result is succeeded + when: ansible_pkg_mgr == 'yum' + +- name: Upgrade master packages - dnf + dnf: + name: "{{ master_pkgs | join(',') }}" + state: present + vars: + master_pkgs: + - "{{ openshift_service_type }}{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}" + register: result + until: result is succeeded + when: ansible_pkg_mgr == 'dnf' diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 14023ea73..4c9ab1864 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -5,6 +5,7 @@ admissionConfig: apiLevels: - v1 apiVersion: v1 +{% if not openshift.common.version_gte_3_9 %} assetConfig: logoutURL: "{{ openshift.master.logout_url | default('') }}" masterPublicURL: {{ openshift.master.public_api_url }} @@ -41,6 +42,8 @@ assetConfig: - {{ cipher_suite }} {% endfor %} {% endif %} +# assetconfig end +{% endif %} {% if openshift.master.audit_config | default(none) is not none %} auditConfig:{{ openshift.master.audit_config | lib_utils_to_padded_yaml(level=1) }} {% endif %} diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index 8da74430f..293d8f451 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -54,7 +54,7 @@ openshift_metrics_master_url: https://kubernetes.default.svc openshift_metrics_node_id: nodename openshift_metrics_project: openshift-infra -openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}" +openshift_metrics_cassandra_pvc_prefix: metrics-cassandra openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}" openshift_metrics_hawkular_user_write_access: False diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index 0866fe0d2..6b6c21d71 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -67,17 +67,17 @@ with_items: "{{ hawkular_agent_object_defs.results }}" when: openshift_metrics_install_hawkular_agent | bool -# TODO: Remove when asset config is removed from master-config.yaml - include_tasks: update_master_config.yaml + when: not openshift.common.version_gte_3_9 # Update asset config in openshift-web-console namespace - name: Add metrics route information to web console asset config include_role: name: openshift_web_console - tasks_from: update_asset_config.yml + tasks_from: update_console_config.yml vars: - asset_config_edits: - - key: metricsPublicURL + console_config_edits: + - key: clusterInfo#metricsPublicURL value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics" when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml index 8ccfb7192..30fdde94c 100644 --- a/roles/openshift_metrics/tasks/oc_apply.yaml +++ b/roles/openshift_metrics/tasks/oc_apply.yaml @@ -16,7 +16,7 @@ apply -f {{ file_name }} -n {{namespace}} register: generation_apply - failed_when: "'error' in generation_apply.stderr" + failed_when: "'error' in generation_apply.stderr or (generation_apply.rc | int != 0)" changed_when: no - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} @@ -28,5 +28,5 @@ register: version_changed vars: init_version: "{{ (generation_init is defined) | ternary(generation_init.stdout, '0') }}" - failed_when: "'error' in version_changed.stderr" + failed_when: "'error' in version_changed.stderr or version_changed.rc | int != 0" changed_when: version_changed.stdout | int > init_version | int diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 610c7b4e5..1664e9975 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -19,13 +19,13 @@ clusterrolebinding/hawkular-metrics changed_when: delete_metrics.stdout != 'No resources found' -# Update asset config in openshift-web-console namespace -- name: Remove metrics route information from web console asset config +# Update the web config in openshift-web-console namespace +- name: Remove metrics route information from the web console config include_role: name: openshift_web_console - tasks_from: update_asset_config.yml + tasks_from: update_console_config.yml vars: - asset_config_edits: - - key: metricsPublicURL + console_config_edits: + - key: clusterInfo#metricsPublicURL value: "" when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index c1fab4382..9f887891b 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -48,6 +48,12 @@ openshift_node_kubelet_args_dict: cloud-config: - "{{ openshift_config_base ~ '/cloudprovider/gce.conf' }}" node-labels: "{{ l_node_kubelet_node_labels }}" + azure: + cloud-provider: + - azure + cloud-config: + - "{{ openshift_config_base ~ '/cloudprovider/azure.conf' }}" + node-labels: "{{ l_node_kubelet_node_labels }}" undefined: node-labels: "{{ l_node_kubelet_node_labels }}" @@ -71,6 +77,19 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) } l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" +openshift_node_syscon_auth_mounts_l: +- type: bind + source: "{{ oreg_auth_credentials_path }}" + destination: "/root/.docker" + options: + - ro + - bind + +# If we need to add new mounts in the future, or the user wants to mount data. +# This should be in the same format as auth_mounts_l above. +openshift_node_syscon_add_mounts_l: [] + + openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}" openshift_node_image_dict: @@ -118,6 +137,7 @@ default_r_openshift_node_image_prep_packages: - yum-utils # gluster - glusterfs-fuse +- device-mapper-multipath # nfs - nfs-utils - flannel diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 754ecacaf..f56f24e12 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -14,33 +14,11 @@ #### Disable SWAP ##### # https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory -- name: Check for swap usage - command: grep "^[^#].*swap" /etc/fstab - # grep: match any lines which don't begin with '#' and contain 'swap' - changed_when: false - failed_when: false - register: swap_result - -- when: - - swap_result.stdout_lines | length > 0 - - openshift_disable_swap | default(true) | bool - block: - - name: Disable swap - command: swapoff --all - - - name: Remove swap entries from /etc/fstab - replace: - dest: /etc/fstab - regexp: '(^[^#].*swap.*)' - replace: '# \1' - backup: yes - - - name: Add notice about disabling swap - lineinfile: - dest: /etc/fstab - line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' - state: present -#### End Disable Swap Block #### +# swapoff is a custom module in lib_utils that comments out swap entries in +# /etc/fstab and runs swapoff -a, if necessary. +- name: Disable swap + swapoff: {} + when: openshift_disable_swap | default(true) | bool - name: include node installer include_tasks: install.yml diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index 06b879050..008f209d7 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -14,4 +14,23 @@ - "DNS_DOMAIN={{ openshift.common.dns_domain }}" - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service" - "MASTER_SERVICE={{ openshift_service_type }}.service" + - 'ADDTL_MOUNTS={{ l_node_syscon_add_mounts2 }}' state: latest + vars: + # We need to evaluate some variables here to ensure + # l_bind_docker_reg_auth is evaluated after registry_auth.yml has been + # processed. + + # Determine if we want to include auth credentials mount. + l_node_syscon_auth_mounts_l: "{{ l_bind_docker_reg_auth | ternary(openshift_node_syscon_auth_mounts_l,[]) }}" + + # Join any user-provided mounts and auth_mounts into a combined list. + l_node_syscon_add_mounts_l: "{{ openshift_node_syscon_add_mounts_l | union(l_node_syscon_auth_mounts_l) }}" + + # We must prepend a ',' here to ensure the value is inserted properly into an + # existing json list in the container's config.json + # lib_utils_oo_l_of_d_to_csv is a custom filter plugin in roles/lib_utils/oo_filters.py + l_node_syscon_add_mounts: ",{{ l_node_syscon_add_mounts_l | lib_utils_oo_l_of_d_to_csv }}" + # if we have just a ',' then both mount lists were empty, we don't want to add + # anything to config.json + l_node_syscon_add_mounts2: "{{ (l_node_syscon_add_mounts != ',') | bool | ternary(l_node_syscon_add_mounts,'') }}" diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml index a8048c42f..72415f9a6 100644 --- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml +++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml @@ -1,6 +1,32 @@ --- - name: Install iSCSI storage plugin dependencies - package: name=iscsi-initiator-utils state=present + package: + name: "{{ item }}" + state: present when: not openshift_is_atomic | bool register: result until: result is succeeded + with_items: + - iscsi-initiator-utils + - device-mapper-multipath + +- name: restart services + systemd: + name: "{{ item }}" + state: started + enabled: True + with_items: + - multipathd + - rpcbind + +- name: Template multipath configuration + template: + dest: "/etc/multipath.conf" + src: multipath.conf.j2 + backup: true + when: not openshift_is_atomic | bool + +#enable multipath +- name: Enable multipath + command: "mpathconf --enable" + when: not openshift_is_atomic | bool diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml index dd9183382..15ac76f7d 100644 --- a/roles/openshift_node/tasks/upgrade/config_changes.yml +++ b/roles/openshift_node/tasks/upgrade/config_changes.yml @@ -27,28 +27,12 @@ path: "/var/lib/cni/networks/openshift-sdn/" state: absent -# Disable Swap Block (pre) -- block: - - name: Remove swap entries from /etc/fstab - replace: - dest: /etc/fstab - regexp: '(^[^#].*swap.*)' - replace: '# \1' - backup: yes - - - name: Add notice about disabling swap - lineinfile: - dest: /etc/fstab - line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' - state: present - - - name: Disable swap - command: swapoff --all - - when: - - openshift_node_upgrade_swap_result | default(False) | bool - - openshift_disable_swap | default(true) | bool -# End Disable Swap Block +# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory +# swapoff is a custom module in lib_utils that comments out swap entries in +# /etc/fstab and runs swapoff -a, if necessary. +- name: Disable swap + swapoff: {} + when: openshift_disable_swap | default(true) | bool - name: Apply 3.6 dns config changes yedit: diff --git a/roles/openshift_node/tasks/upgrade_pre.yml b/roles/openshift_node/tasks/upgrade_pre.yml index 3ae7dc6b6..aa1a75100 100644 --- a/roles/openshift_node/tasks/upgrade_pre.yml +++ b/roles/openshift_node/tasks/upgrade_pre.yml @@ -41,16 +41,3 @@ vars: openshift_version: "{{ openshift_pkg_version | default('') }}" when: not openshift_is_containerized | bool - -# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory -- name: Check for swap usage - command: grep "^[^#].*swap" /etc/fstab - # grep: match any lines which don't begin with '#' and contain 'swap' - changed_when: false - failed_when: false - register: swap_result - -# Set this fact here so we can use it during the next play, which is serial. -- name: set_fact swap_result - set_fact: - openshift_node_upgrade_swap_result: "{{ swap_result.stdout_lines | length > 0 | bool }}" diff --git a/roles/openshift_node/templates/multipath.conf.j2 b/roles/openshift_node/templates/multipath.conf.j2 new file mode 100644 index 000000000..8a0abc2c1 --- /dev/null +++ b/roles/openshift_node/templates/multipath.conf.j2 @@ -0,0 +1,15 @@ +# LIO iSCSI +# TODO: Add env variables for tweaking +devices { + device { + vendor "LIO-ORG" + user_friendly_names "yes" + path_grouping_policy "failover" + path_selector "round-robin 0" + failback immediate + path_checker "tur" + prio "const" + no_path_retry 120 + rr_weight "uniform" + } +} diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2 index 777f4a449..7405cfd73 100644 --- a/roles/openshift_node/templates/node.service.j2 +++ b/roles/openshift_node/templates/node.service.j2 @@ -6,7 +6,7 @@ After=ovsdb-server.service After=ovs-vswitchd.service Wants={{ openshift_docker_service_name }}.service Documentation=https://github.com/openshift/origin -Requires=dnsmasq.service +Wants=dnsmasq.service After=dnsmasq.service {% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %} diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 5f2a94ea2..7d817463c 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -32,7 +32,7 @@ masterClientConnectionOverrides: contentType: application/vnd.kubernetes.protobuf burst: 200 qps: 100 -masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig +masterKubeConfig: system:node:{{ openshift.common.hostname | lower }}.kubeconfig {% if openshift_node_use_openshift_sdn | bool %} networkPluginName: {{ openshift_node_sdn_network_plugin_name }} {% endif %} diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index ae7b147a6..23823e3e5 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -13,7 +13,7 @@ After=ovs-vswitchd.service Wants={{ openshift_service_type }}-master.service Requires={{ openshift_service_type }}-node-dep.service After={{ openshift_service_type }}-node-dep.service -Requires=dnsmasq.service +Wants=dnsmasq.service After=dnsmasq.service [Service] diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index 5f73f3bdc..13d9fd718 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -18,9 +18,9 @@ stat: path: "{{ openshift.common.config_base }}/node/{{ item }}" with_items: - - "system:node:{{ openshift.common.hostname }}.crt" - - "system:node:{{ openshift.common.hostname }}.key" - - "system:node:{{ openshift.common.hostname }}.kubeconfig" + - "system:node:{{ openshift.common.hostname | lower }}.crt" + - "system:node:{{ openshift.common.hostname | lower }}.key" + - "system:node:{{ openshift.common.hostname | lower }}.kubeconfig" - ca.crt - server.key - server.crt @@ -59,16 +59,16 @@ --certificate-authority {{ legacy_ca_certificate }} {% endfor %} --certificate-authority={{ openshift_ca_cert }} - --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }} + --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }} --groups=system:nodes --master={{ hostvars[openshift_ca_host].openshift.master.api_url }} --signer-cert={{ openshift_ca_cert }} --signer-key={{ openshift_ca_key }} --signer-serial={{ openshift_ca_serial }} - --user=system:node:{{ hostvars[item].openshift.common.hostname }} + --user=system:node:{{ hostvars[item].openshift.common.hostname | lower }} --expire-days={{ openshift_node_cert_expire_days }} args: - creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}" + creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}" with_items: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']) | lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}" @@ -78,16 +78,16 @@ - name: Generate the node server certificate command: > {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert - --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt - --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key + --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.crt + --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.key --expire-days={{ openshift_node_cert_expire_days }} --overwrite=true - --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }} + --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.hostname | lower }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.public_hostname | lower }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }} --signer-cert={{ openshift_ca_cert }} --signer-key={{ openshift_ca_key }} --signer-serial={{ openshift_ca_serial }} args: - creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt" + creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.crt" with_items: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']) | lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}" diff --git a/roles/openshift_node_certificates/vars/main.yml b/roles/openshift_node_certificates/vars/main.yml index 17ad8106d..12a6d3f94 100644 --- a/roles/openshift_node_certificates/vars/main.yml +++ b/roles/openshift_node_certificates/vars/main.yml @@ -1,7 +1,7 @@ --- openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs" openshift_node_cert_dir: "{{ openshift.common.config_base }}/node" -openshift_node_cert_subdir: "node-{{ openshift.common.hostname }}" +openshift_node_cert_subdir: "node-{{ openshift.common.hostname | lower }}" openshift_node_config_dir: "{{ openshift.common.config_base }}/node" openshift_node_generated_config_dir: "{{ openshift_generated_configs_dir }}/{{ openshift_node_cert_subdir }}" diff --git a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 index 32c6b5838..9015c561f 100644 --- a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 +++ b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 @@ -1,4 +1,8 @@ +{% if docker_storage_mountpoints is defined %} +DEVS="{{ docker_storage_mountpoints }}" +{% else %} DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +{% endif %} VG="{{ openshift_openstack_container_storage_setup.docker_vg }}" DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}" EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ openshift_openstack_container_storage_setup.docker_dm_basesize }}" diff --git a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 index 1bf366bdc..917347073 100644 --- a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 +++ b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 @@ -1,4 +1,8 @@ +{% if docker_storage_mountpoints is defined %} +DEVS="{{ docker_storage_mountpoints }}" +{% else %} DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +{% endif %} VG="{{ openshift_openstack_container_storage_setup.docker_vg }}" DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}" STORAGE_DRIVER=overlay2 diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 index 1be5d3a62..1d3173022 100644 --- a/roles/openshift_openstack/templates/heat_stack.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -418,6 +418,10 @@ resources: protocol: tcp port_range_min: 443 port_range_max: 443 + - direction: ingress + protocol: tcp + port_range_min: 1936 + port_range_max: 1936 cns-secgrp: type: OS::Neutron::SecurityGroup @@ -523,7 +527,7 @@ resources: floating_network: if: - no_floating - - null + - '' - {{ openshift_openstack_external_network_name }} {% if openshift_openstack_provider_network_name %} attach_float_net: false @@ -589,8 +593,13 @@ resources: secgrp: - { get_resource: lb-secgrp } - { get_resource: common-secgrp } -{% if not openshift_openstack_provider_network_name %} - floating_network: {{ openshift_openstack_external_network_name }} + floating_network: + if: + - no_floating + - '' + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_provider_network_name %} + attach_float_net: false {% endif %} volume_size: {{ openshift_openstack_lb_volume_size }} {% if not openshift_openstack_provider_network_name %} @@ -655,7 +664,7 @@ resources: floating_network: if: - no_floating - - null + - '' - {{ openshift_openstack_external_network_name }} {% if openshift_openstack_provider_network_name %} attach_float_net: false @@ -725,7 +734,7 @@ resources: floating_network: if: - no_floating - - null + - '' - {{ openshift_openstack_external_network_name }} {% if openshift_openstack_provider_network_name %} attach_float_net: false @@ -792,8 +801,13 @@ resources: {% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } -{% if not openshift_openstack_provider_network_name %} - floating_network: {{ openshift_openstack_external_network_name }} + floating_network: + if: + - no_floating + - '' + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_provider_network_name %} + attach_float_net: false {% endif %} volume_size: {{ openshift_openstack_infra_volume_size }} {% if openshift_openstack_infra_server_group_policies|length > 0 %} diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 index 1e73c9e1c..9aeecfa74 100644 --- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -102,13 +102,11 @@ parameters: label: Attach-float-net description: A switch for floating network port connection -{% if not openshift_openstack_provider_network_name %} floating_network: type: string default: '' label: Floating network description: Network to allocate floating IP from -{% endif %} availability_zone: type: string @@ -263,11 +261,12 @@ resources: properties: size: { get_param: volume_size } availability_zone: { get_param: availability_zone } + metadata: + purpose: openshift_docker_storage volume_attachment: type: OS::Cinder::VolumeAttachment properties: volume_id: { get_resource: cinder_volume } instance_uuid: { get_resource: server } - mountpoint: /dev/sdb {% endif %} diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml index ef9ab7f5f..865269b7a 100644 --- a/roles/openshift_persistent_volumes/tasks/pv.yml +++ b/roles/openshift_persistent_volumes/tasks/pv.yml @@ -13,5 +13,5 @@ --config={{ mktemp.stdout }}/admin.kubeconfig register: pv_create_output when: persistent_volumes | length > 0 - failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) + failed_when: "('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) and pv_create_output.rc != 0" changed_when: ('created' in pv_create_output.stdout) diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml index 2c5519192..6c12d128c 100644 --- a/roles/openshift_persistent_volumes/tasks/pvc.yml +++ b/roles/openshift_persistent_volumes/tasks/pvc.yml @@ -13,5 +13,5 @@ --config={{ mktemp.stdout }}/admin.kubeconfig register: pvc_create_output when: persistent_volume_claims | length > 0 - failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) + failed_when: "('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) and pvc_create_output.rc != 0" changed_when: ('created' in pvc_create_output.stdout) diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md index 1ebeacabf..6079e6016 100644 --- a/roles/openshift_prometheus/README.md +++ b/roles/openshift_prometheus/README.md @@ -31,7 +31,7 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml). e.g ``` -openshift_prometheus_args=['--storage.tsdb.retention=6h', '--storage.tsdb.min-block-duration=5s', '--storage.tsdb.max-block-duration=6m'] +openshift_prometheus_args=['--storage.tsdb.retention=6h', '--query.timeout=2m'] ``` ## PVC related variables diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml index e30108d2c..37a05f3f0 100644 --- a/roles/openshift_prometheus/defaults/main.yaml +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -7,14 +7,29 @@ openshift_prometheus_namespace: openshift-metrics # defaults hosts for routes openshift_prometheus_hostname: prometheus-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}} openshift_prometheus_alerts_hostname: alerts-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}} +openshift_prometheus_alertmanager_hostname: alertmanager-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}} + openshift_prometheus_node_selector: {"region":"infra"} +openshift_prometheus_service_port: 443 +openshift_prometheus_service_targetport: 8443 +openshift_prometheus_service_name: prometheus +openshift_prometheus_alerts_service_targetport: 9443 +openshift_prometheus_alerts_service_name: alerts +openshift_prometheus_alertmanager_service_targetport: 10443 +openshift_prometheus_alertmanager_service_name: alertmanager +openshift_prometheus_serviceaccount_annotations: [] +l_openshift_prometheus_serviceaccount_annotations: + - serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' + - serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' + - serviceaccounts.openshift.io/oauth-redirectreference.alertmanager='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alertmanager"}}' + # additional prometheus rules file openshift_prometheus_additional_rules_file: null #prometheus application arguments -openshift_prometheus_args: ['--storage.tsdb.retention=6h', '--storage.tsdb.min-block-duration=2m'] +openshift_prometheus_args: ['--storage.tsdb.retention=6h'] # storage # One of ['emptydir', 'pvc'] diff --git a/roles/openshift_prometheus/tasks/facts.yaml b/roles/openshift_prometheus/tasks/facts.yaml new file mode 100644 index 000000000..214089732 --- /dev/null +++ b/roles/openshift_prometheus/tasks/facts.yaml @@ -0,0 +1,10 @@ +--- +# The kubernetes version impacts the prometheus scraping endpoint +# so gathering it before constructing the configmap +- name: get oc version + oc_version: + register: oc_version + +- set_fact: + kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}" + openshift_prometheus_serviceaccount_annotations: "{{ l_openshift_prometheus_serviceaccount_annotations + openshift_prometheus_serviceaccount_annotations|list }}" diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index 749df5152..0b565502f 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -1,4 +1,6 @@ --- +# set facts +- include_tasks: facts.yaml # namespace - name: Add prometheus project @@ -9,7 +11,7 @@ description: Prometheus # secrets -- name: Set alert and prometheus secrets +- name: Set alert, alertmanager and prometheus secrets oc_secret: state: present name: "{{ item }}-proxy" @@ -20,30 +22,24 @@ with_items: - prometheus - alerts + - alertmanager # serviceaccount - name: create prometheus serviceaccount oc_serviceaccount: state: present - name: prometheus + name: "{{ openshift_prometheus_service_name }}" namespace: "{{ openshift_prometheus_namespace }}" - # TODO add annotations when supproted - # annotations: - # serviceaccounts.openshift.io/oauth-redirectreference.prom: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' - # serviceaccounts.openshift.io/oauth-redirectreference.alerts: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' - - secrets: - - prometheus-secrets changed_when: no + # TODO remove this when annotations are supported by oc_serviceaccount - name: annotate serviceaccount command: > {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} - serviceaccount prometheus - serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' - serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' - + serviceaccount {{ openshift_prometheus_service_name }} {{ item }} + with_items: + "{{ openshift_prometheus_serviceaccount_annotations }}" # create clusterrolebinding for prometheus serviceaccount - name: Set cluster-reader permissions for prometheus @@ -52,63 +48,61 @@ namespace: "{{ openshift_prometheus_namespace }}" resource_kind: cluster-role resource_name: cluster-reader - user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus" + user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:{{ openshift_prometheus_service_name }}" + -# create prometheus and alerts services -# TODO join into 1 task with loop -- name: Create prometheus service +- name: create services for prometheus oc_service: - state: present - name: "{{ item.name }}" + name: "{{ openshift_prometheus_service_name }}" namespace: "{{ openshift_prometheus_namespace }}" - selector: - app: prometheus labels: - name: "{{ item.name }}" - # TODO add annotations when supported - # annotations: - # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls" + name: prometheus + annotations: + oprometheus.io/scrape: 'true' + oprometheus.io/scheme: https + service.alpha.openshift.io/serving-cert-secret-name: prometheus-tls ports: - - port: 443 - targetPort: 8443 - with_items: - - name: prometheus + - name: prometheus + port: "{{ openshift_prometheus_service_port }}" + targetPort: "{{ openshift_prometheus_service_targetport }}" + protocol: TCP + selector: + app: prometheus -- name: Create alerts service +- name: create services for alert buffer oc_service: - state: present - name: "{{ item.name }}" + name: "{{ openshift_prometheus_alerts_service_name }}" namespace: "{{ openshift_prometheus_namespace }}" + labels: + name: prometheus + annotations: + service.alpha.openshift.io/serving-cert-secret-name: alerts-tls + ports: + - name: prometheus + port: "{{ openshift_prometheus_service_port }}" + targetPort: "{{ openshift_prometheus_alerts_service_targetport }}" + protocol: TCP selector: app: prometheus + +- name: create services for alertmanager + oc_service: + name: "{{ openshift_prometheus_alertmanager_service_name }}" + namespace: "{{ openshift_prometheus_namespace }}" labels: - name: "{{ item.name }}" - # TODO add annotations when supported - # annotations: - # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls" + name: prometheus + annotations: + service.alpha.openshift.io/serving-cert-secret-name: alertmanager-tls ports: - - port: 443 - targetPort: 9443 - with_items: - - name: alerts - - -# Annotate services with secret name -# TODO remove this when annotations are supported by oc_service -- name: annotate prometheus service - command: > - {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} - service prometheus - prometheus.io/scrape='true' - prometheus.io/scheme=https - service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls - -- name: annotate alerts service - command: > - {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} - service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls' + - name: prometheus + port: "{{ openshift_prometheus_service_port }}" + targetPort: "{{ openshift_prometheus_alertmanager_service_targetport }}" + protocol: TCP + selector: + app: prometheus # create prometheus and alerts routes +# TODO: oc_route module should support insecureEdgeTerminationPolicy: Redirect - name: create prometheus and alerts routes oc_route: state: present @@ -122,6 +116,8 @@ host: "{{ openshift_prometheus_hostname }}" - name: alerts host: "{{ openshift_prometheus_alerts_hostname }}" + - name: alertmanager + host: "{{ openshift_prometheus_alertmanager_hostname }}" # Storage - name: create prometheus pvc @@ -169,15 +165,6 @@ path: "{{ tempdir }}/prometheus.additional.rules" register: additional_rules_stat -# The kubernetes version impacts the prometheus scraping endpoint -# so gathering it before constructing the configmap -- name: get oc version - oc_version: - register: oc_version - -- set_fact: - kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}" - - template: src: prometheus.yml.j2 dest: "{{ tempdir }}/prometheus.yml" @@ -219,7 +206,7 @@ - name: Set alertmanager configmap oc_configmap: state: present - name: "prometheus-alerts" + name: "alertmanager" namespace: "{{ openshift_prometheus_namespace }}" from_file: alertmanager.yml: "{{ tempdir }}/alertmanager.yml" diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml index b859eb111..66d65a3f2 100644 --- a/roles/openshift_prometheus/tasks/main.yaml +++ b/roles/openshift_prometheus/tasks/main.yaml @@ -16,9 +16,11 @@ - name: Create templates subdirectory file: state: directory - path: "{{ tempdir }}/templates" + path: "{{ tempdir }}/{{ item }}" mode: 0755 changed_when: False + with_items: + - templates - include_tasks: install_prometheus.yaml when: openshift_prometheus_state == 'present' diff --git a/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml b/roles/openshift_prometheus/tasks/uninstall.yaml index d746402db..d746402db 100644 --- a/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/uninstall.yaml diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2 index d780550b8..c0abd483b 100644 --- a/roles/openshift_prometheus/templates/prometheus.j2 +++ b/roles/openshift_prometheus/templates/prometheus.j2 @@ -19,7 +19,7 @@ spec: labels: app: prometheus spec: - serviceAccountName: prometheus + serviceAccountName: "{{ openshift_prometheus_service_name }}" {% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %} nodeSelector: {% for key, value in openshift_prometheus_node_selector.items() %} @@ -47,15 +47,15 @@ spec: cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}" {% endif %} ports: - - containerPort: 8443 + - containerPort: {{ openshift_prometheus_service_targetport }} name: web args: - -provider=openshift - - -https-address=:8443 + - -https-address=:{{ openshift_prometheus_service_targetport }} - -http-address= - -email-domain=* - -upstream=http://localhost:9090 - - -client-id=system:serviceaccount:{{ namespace }}:prometheus + - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }} - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}' - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}' - -tls-cert=/etc/tls/private/tls.crt @@ -67,9 +67,9 @@ spec: - -skip-auth-regex=^/metrics volumeMounts: - mountPath: /etc/tls/private - name: prometheus-tls + name: prometheus-tls-secret - mountPath: /etc/proxy/secrets - name: prometheus-secrets + name: prometheus-proxy-secret - mountPath: /prometheus name: prometheus-data @@ -104,7 +104,7 @@ spec: - mountPath: /prometheus name: prometheus-data - # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy + # Deploy alert-buffer behind oauth alerts-proxy - name: alerts-proxy image: "{{ l_openshift_prometheus_proxy_image_prefix }}oauth-proxy:{{ l_openshift_prometheus_proxy_image_version }}" imagePullPolicy: IfNotPresent @@ -124,15 +124,15 @@ spec: cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}" {% endif %} ports: - - containerPort: 9443 + - containerPort: {{ openshift_prometheus_alerts_service_targetport }} name: web args: - -provider=openshift - - -https-address=:9443 + - -https-address=:{{ openshift_prometheus_alerts_service_targetport }} - -http-address= - -email-domain=* - -upstream=http://localhost:9099 - - -client-id=system:serviceaccount:{{ namespace }}:prometheus + - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }} - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}' - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}' - -tls-cert=/etc/tls/private/tls.crt @@ -143,9 +143,9 @@ spec: - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt volumeMounts: - mountPath: /etc/tls/private - name: alerts-tls + name: alerts-tls-secret - mountPath: /etc/proxy/secrets - name: alerts-secrets + name: alerts-proxy-secret - name: alert-buffer args: @@ -169,11 +169,54 @@ spec: {% endif %} volumeMounts: - mountPath: /alert-buffer - name: alert-buffer-data + name: alerts-data ports: - containerPort: 9099 name: alert-buf + # Deploy alertmanager behind oauth alertmanager-proxy + - name: alertmanager-proxy + image: "{{ l_openshift_prometheus_proxy_image_prefix }}oauth-proxy:{{ l_openshift_prometheus_proxy_image_version }}" + imagePullPolicy: IfNotPresent + requests: +{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %} + memory: "{{ openshift_prometheus_oauth_proxy_memory_requests }}" +{% endif %} +{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %} + cpu: "{{ openshift_prometheus_oauth_proxy_cpu_requests }}" +{% endif %} + limits: +{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %} + memory: "{{ openshift_prometheus_oauth_proxy_memory_limit }}" +{% endif %} +{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %} + cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}" +{% endif %} + ports: + - containerPort: {{ openshift_prometheus_alertmanager_service_targetport }} + name: web + args: + - -provider=openshift + - -https-address=:{{ openshift_prometheus_alertmanager_service_targetport }} + - -http-address= + - -email-domain=* + - -upstream=http://localhost:9093 + - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }} + - -openshift-ca=/etc/pki/tls/cert.pem + - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}' + - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}' + - -tls-cert=/etc/tls/private/tls.crt + - -tls-key=/etc/tls/private/tls.key + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret-file=/etc/proxy/secrets/session_secret + - -skip-auth-regex=^/metrics + volumeMounts: + - mountPath: /etc/tls/private + name: alertmanager-tls-secret + - mountPath: /etc/proxy/secrets + name: alertmanager-proxy-secret + - name: alertmanager args: - -config.file=/etc/alertmanager/alertmanager.yml @@ -205,14 +248,15 @@ spec: restartPolicy: Always volumes: + - name: prometheus-config configMap: defaultMode: 420 name: prometheus - - name: prometheus-secrets + - name: prometheus-proxy-secret secret: secretName: prometheus-proxy - - name: prometheus-tls + - name: prometheus-tls-secret secret: secretName: prometheus-tls - name: prometheus-data @@ -225,13 +269,19 @@ spec: - name: alertmanager-config configMap: defaultMode: 420 - name: prometheus-alerts - - name: alerts-secrets + name: alertmanager + - name: alertmanager-proxy-secret secret: - secretName: alerts-proxy - - name: alerts-tls + secretName: alertmanager-proxy + - name: alertmanager-tls-secret + secret: + secretName: alertmanager-tls + - name: alerts-tls-secret secret: - secretName: prometheus-alerts-tls + secretName: alerts-tls + - name: alerts-proxy-secret + secret: + secretName: alerts-proxy - name: alertmanager-data {% if openshift_prometheus_alertmanager_storage_type == 'pvc' %} persistentVolumeClaim: @@ -239,7 +289,7 @@ spec: {% else %} emptydir: {} {% endif %} - - name: alert-buffer-data + - name: alerts-data {% if openshift_prometheus_alertbuffer_storage_type == 'pvc' %} persistentVolumeClaim: claimName: {{ openshift_prometheus_alertbuffer_pvc_name }} diff --git a/roles/openshift_prometheus/templates/prometheus.yml.j2 b/roles/openshift_prometheus/templates/prometheus.yml.j2 index 63430f834..005c2c564 100644 --- a/roles/openshift_prometheus/templates/prometheus.yml.j2 +++ b/roles/openshift_prometheus/templates/prometheus.yml.j2 @@ -1,10 +1,5 @@ rule_files: - - 'prometheus.rules' -{% if openshift_prometheus_additional_rules_file is defined and openshift_prometheus_additional_rules_file is not none %} - - 'prometheus.additional.rules' -{% endif %} - - + - '*.rules' # A scrape configuration for running Prometheus on a Kubernetes cluster. # This uses separate scrape configs for cluster components (i.e. API server, node) @@ -39,31 +34,11 @@ scrape_configs: action: keep regex: default;kubernetes;https -# Scrape config for nodes. -# -# Each node exposes a /metrics endpoint that contains operational metrics for -# the Kubelet and other components. -- job_name: 'kubernetes-nodes' - - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - # Scrape config for controllers. # # Each master node exposes a /metrics endpoint on :8444 that contains operational metrics for # the controllers. # -# TODO: move this to a pure endpoints based metrics gatherer when controllers are exposed via -# endpoints. - job_name: 'kubernetes-controllers' scheme: https @@ -87,6 +62,27 @@ scrape_configs: regex: (.+)(?::\d+) replacement: $1:8444 +# Scrape config for nodes. +# +# Each node exposes a /metrics endpoint that contains operational metrics for +# the Kubelet and other components. +- job_name: 'kubernetes-nodes' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + # Drop a very high cardinality metric that is incorrect in 3.7. It will be + # fixed in 3.9. + metric_relabel_configs: + - source_labels: [__name__] + action: drop + regex: 'openshift_sdn_pod_(setup|teardown)_latency(.*)' + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + # Scrape config for cAdvisor. # # Beginning in Kube 1.7, each node exposes a /metrics/cadvisor endpoint that @@ -107,6 +103,14 @@ scrape_configs: kubernetes_sd_configs: - role: node + # Exclude a set of high cardinality metrics that can contribute to significant + # memory use in large clusters. These can be selectively enabled as necessary + # for medium or small clusters. + metric_relabel_configs: + - source_labels: [__name__] + action: drop + regex: 'container_(cpu_user_seconds_total|cpu_cfs_periods_total|memory_usage_bytes|memory_swap|memory_working_set_bytes|memory_cache|last_seen|fs_(read_seconds_total|write_seconds_total|sector_(.*)|io_(.*)|reads_merged_total|writes_merged_total)|tasks_state|memory_failcnt|memory_failures_total|spec_memory_swap_limit_bytes|fs_(.*)_bytes_total|spec_(.*))' + relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) @@ -133,38 +137,101 @@ scrape_configs: - role: endpoints relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + # only scrape infrastructure components + - source_labels: [__meta_kubernetes_namespace] + action: keep + regex: 'default|logging|metrics|kube-.+|openshift|openshift-.+' + # drop infrastructure components managed by other scrape targets + - source_labels: [__meta_kubernetes_service_name] + action: drop + regex: 'prometheus-node-exporter' + # only those that have requested scraping + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+)(?::\d+);(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + +# Scrape config for node-exporter, which is expected to be running on port 9100. +- job_name: 'kubernetes-nodes-exporter' + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + + kubernetes_sd_configs: + - role: node + + metric_relabel_configs: + - source_labels: [__name__] + action: drop + regex: 'node_cpu|node_(disk|scrape_collector)_.+' + # preserve a subset of the network, netstat, vmstat, and filesystem series + - source_labels: [__name__] action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + regex: '(node_(netstat_Ip_.+|vmstat_(nr|thp)_.+|filesystem_(free|size|device_error)|network_(transmit|receive)_(drop|errs)))' + target_label: __name__ + replacement: renamed_$1 + - source_labels: [__name__] + action: drop + regex: 'node_(netstat|vmstat|filesystem|network)_.+' + - source_labels: [__name__] action: replace + regex: 'renamed_(.+)' + target_label: __name__ + replacement: $1 + # drop any partial expensive series + - source_labels: [__name__, device] + action: drop + regex: 'node_network_.+;veth.+' + - source_labels: [__name__, mountpoint] + action: drop + regex: 'node_filesystem_(free|size|device_error);([^/].*|/.+)' + + relabel_configs: + - source_labels: [__address__] + regex: '(.*):10250' + replacement: '${1}:9100' target_label: __address__ - regex: (.+)(?::\d+);(\d+) - replacement: $1:$2 - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_username] - action: replace - target_label: __basic_auth_username__ - regex: (.+) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_password] - action: replace - target_label: __basic_auth_password__ - regex: (.+) + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: kubernetes_name + regex: __meta_kubernetes_node_label_(.+) + +# Scrape config for the template service broker +- job_name: 'openshift-template-service-broker' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + server_name: apiserver.openshift-template-service-broker.svc + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: openshift-template-service-broker;apiserver;https + alerting: alertmanagers: diff --git a/roles/openshift_provisioners/defaults/main.yaml b/roles/openshift_provisioners/defaults/main.yaml index a6f040831..34ba78404 100644 --- a/roles/openshift_provisioners/defaults/main.yaml +++ b/roles/openshift_provisioners/defaults/main.yaml @@ -1,7 +1,5 @@ --- openshift_provisioners_install_provisioners: True -openshift_provisioners_image_prefix: docker.io/openshift/origin- -openshift_provisioners_image_version: latest openshift_provisioners_efs: False openshift_provisioners_efs_path: /persistentvolumes @@ -10,3 +8,11 @@ openshift_provisioners_efs_nodeselector: "" openshift_provisioners_efs_supplementalgroup: '65534' openshift_provisioners_project: openshift-infra + +openshift_provisioners_image_prefix_dict: + origin: "docker.io/openshift/origin-" + openshift-enterprise: "registry.access.redhat.com/openshift3/ose-" + +openshift_provisioners_image_version_dict: + origin: "latest" + openshift-enterprise: "{{ openshift_image_tag }}" diff --git a/roles/openshift_provisioners/tasks/main.yaml b/roles/openshift_provisioners/tasks/main.yaml index 4ba26b2b8..d00573b07 100644 --- a/roles/openshift_provisioners/tasks/main.yaml +++ b/roles/openshift_provisioners/tasks/main.yaml @@ -12,6 +12,11 @@ check_mode: no tags: provisioners_init +- name: Set eventrouter image facts + set_fact: + openshift_provisioners_image_prefix: "{{ openshift_provisioners_image_prefix | default(openshift_provisioners_image_prefix_dict[openshift_deployment_type]) }}" + openshift_provisioners_image_version: "{{ openshift_provisioners_image_version | default(openshift_provisioners_image_version_dict[openshift_deployment_type]) }}" + - include_tasks: install_provisioners.yaml when: openshift_provisioners_install_provisioners | default(false) | bool diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml index a4ce53eae..27c8a4b81 100644 --- a/roles/openshift_provisioners/tasks/oc_apply.yaml +++ b/roles/openshift_provisioners/tasks/oc_apply.yaml @@ -15,7 +15,7 @@ apply -f {{ file_name }} -n {{ namespace }} register: generation_apply - failed_when: "'error' in generation_apply.stderr" + failed_when: "'error' in generation_apply.stderr or generation_apply.rc != 0" changed_when: no - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} @@ -36,7 +36,7 @@ delete -f {{ file_name }} -n {{ namespace }} register: generation_delete - failed_when: "'error' in generation_delete.stderr" + failed_when: "'error' in generation_delete.stderr or generation_delete.rc != 0" changed_when: generation_delete.rc == 0 when: generation_apply.rc != 0 @@ -46,6 +46,6 @@ apply -f {{ file_name }} -n {{ namespace }} register: generation_apply - failed_when: "'error' in generation_apply.stderr" + failed_when: "'error' in generation_apply.stderr or generation_apply.rc | int != 0" changed_when: generation_apply.rc == 0 when: generation_apply.rc != 0 diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml index 795b8ee60..b1ddbc07a 100644 --- a/roles/openshift_sanitize_inventory/tasks/deprecations.yml +++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml @@ -2,15 +2,18 @@ - name: Check for usage of deprecated variables set_fact: - __deprecation_message: "{{ __deprecation_message | default([]) }} + ['{{ __deprecation_header }} {{ item }} is a deprecated variable and will be no longer be used in the next minor release. Please update your inventory accordingly.']" + __deprecation_message: "{{ __deprecation_message | default( __deprecation_header ) }} \n\t{{ item }}" when: - hostvars[inventory_hostname][item] is defined with_items: "{{ __warn_deprecated_vars }}" - block: - debug: msg="{{__deprecation_message}}" - - pause: - seconds: "{{ 10 }}" + - run_once: true + set_stats: + data: + installer_phase_initialize: + message: "{{ __deprecation_message }}" when: - __deprecation_message | default ('') | length > 0 diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml index 1c4984467..be0715ab5 100644 --- a/roles/openshift_sanitize_inventory/tasks/unsupported.yml +++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml @@ -45,7 +45,8 @@ - name: Ensure the hosted registry's GlusterFS storage is configured correctly when: - openshift_hosted_registry_storage_kind | default(none) in ['glusterfs'] - - openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips != '' + - openshift_hosted_registry_storage_glusterfs_ips is defined + - openshift_hosted_registry_storage_glusterfs_ips != [] - "'glusterfs_registry' in groups | default([])" fail: msg: |- diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml index df15948d2..51c6e0a64 100644 --- a/roles/openshift_sanitize_inventory/vars/main.yml +++ b/roles/openshift_sanitize_inventory/vars/main.yml @@ -1,6 +1,6 @@ --- -__deprecation_header: "[DEPRECATION WARNING]:" +__deprecation_header: "[DEPRECATION WARNING]: The following are deprecated variables and will be no longer be used in the next minor release. Please update your inventory accordingly." # this is a list of variables that we will be deprecating within the next minor release, this list should be expected to change from release to release __warn_deprecated_vars: diff --git a/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml b/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml new file mode 100644 index 000000000..28abcbcfc --- /dev/null +++ b/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml @@ -0,0 +1,86 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: system:service-catalog:aggregate-to-admin +rules: +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - create + - update + - delete + - get + - list + - watch + - patch +- apiGroups: + - "settings.k8s.io" + attributeRestrictions: null + resources: + - podpresets + verbs: + - create + - update + - delete + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: system:service-catalog:aggregate-to-edit +rules: +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - create + - update + - delete + - get + - list + - watch + - patch +- apiGroups: + - "settings.k8s.io" + attributeRestrictions: null + resources: + - podpresets + verbs: + - create + - update + - delete + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:service-catalog:aggregate-to-view +rules: +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - get + - list + - watch diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 9b38a85c4..4d06c1872 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -74,74 +74,17 @@ template_name: kube-system-service-catalog-role-bindings namespace: kube-system -- oc_obj: - name: edit - kind: clusterrole - state: list - register: edit_yaml - -# only do this if we don't already have the updated role info -- name: Generate apply template for clusterrole/edit - template: - src: sc_admin_edit_role_patching.j2 - dest: "{{ mktemp.stdout }}/edit_sc_patch.yml" - vars: - original_content: "{{ edit_yaml.results.results[0] | to_yaml }}" - when: - - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -# only do this if we don't already have the updated role info -- name: update edit role for service catalog and pod preset access - command: > - {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml - when: - - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -- oc_obj: - name: admin - kind: clusterrole - state: list - register: admin_yaml - -# only do this if we don't already have the updated role info -- name: Generate apply template for clusterrole/admin - template: - src: sc_admin_edit_role_patching.j2 - dest: "{{ mktemp.stdout }}/admin_sc_patch.yml" - vars: - original_content: "{{ admin_yaml.results.results[0] | to_yaml }}" - when: - - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -# only do this if we don't already have the updated role info -- name: update admin role for service catalog and pod preset access - command: > - {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml - when: - - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -- oc_obj: - name: view - kind: clusterrole - state: list - register: view_yaml - -# only do this if we don't already have the updated role info -- name: Generate apply template for clusterrole/view - template: - src: sc_view_role_patching.j2 - dest: "{{ mktemp.stdout }}/view_sc_patch.yml" - vars: - original_content: "{{ view_yaml.results.results[0] | to_yaml }}" - when: - - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) - -# only do this if we don't already have the updated role info -- name: update view role for service catalog access - command: > - {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml - when: - - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) +- copy: + src: openshift_catalog_clusterroles.yml + dest: "{{ mktemp.stdout }}/openshift_catalog_clusterroles.yml" + +- name: Apply Service Catalog cluster roles + retries: 5 + delay: 2 + register: task_result + until: task_result.rc == 0 + shell: > + {{ openshift_client_binary }} auth reconcile --config={{ openshift.common.config_base }}/master/admin.kubeconfig -f {{ mktemp.stdout}}/openshift_catalog_clusterroles.yml - oc_adm_policy_user: namespace: kube-service-catalog diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2 index e345df32c..a18d29ef0 100644 --- a/roles/openshift_service_catalog/templates/api_server.j2 +++ b/roles/openshift_service_catalog/templates/api_server.j2 @@ -49,7 +49,7 @@ spec: - OriginatingIdentity=true image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }} command: ["/usr/bin/service-catalog"] - imagePullPolicy: Always + imagePullPolicy: IfNotPresent name: apiserver ports: - containerPort: 6443 diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2 index c61e05f73..6d3ee7d01 100644 --- a/roles/openshift_service_catalog/templates/controller_manager.j2 +++ b/roles/openshift_service_catalog/templates/controller_manager.j2 @@ -44,7 +44,7 @@ spec: {% endif %} image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }} command: ["/usr/bin/service-catalog"] - imagePullPolicy: Always + imagePullPolicy: IfNotPresent name: controller-manager ports: - containerPort: 8080 diff --git a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 deleted file mode 100644 index 59cceafcf..000000000 --- a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 +++ /dev/null @@ -1,27 +0,0 @@ -{{ original_content }} -- apiGroups: - - "servicecatalog.k8s.io" - attributeRestrictions: null - resources: - - serviceinstances - - servicebindings - verbs: - - create - - update - - delete - - get - - list - - watch - - patch -- apiGroups: - - "settings.k8s.io" - attributeRestrictions: null - resources: - - podpresets - verbs: - - create - - update - - delete - - get - - list - - watch diff --git a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 deleted file mode 100644 index 838993854..000000000 --- a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 +++ /dev/null @@ -1,11 +0,0 @@ -{{ original_content }} -- apiGroups: - - "servicecatalog.k8s.io" - attributeRestrictions: null - resources: - - serviceinstances - - servicebindings - verbs: - - get - - list - - watch diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index f7bd58db3..70a89b0ba 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -73,49 +73,51 @@ Role Variables This role has the following variables that control the integration of a GlusterFS cluster into a new or existing OpenShift cluster: -| Name | Default value | Description | -|--------------------------------------------------|-------------------------|-----------------------------------------| -| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready -| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources -| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized -| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names -| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name -| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. -| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster -| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default -| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' -| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods -| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service -| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7' -| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod -| openshift_storage_glusterfs_block_host_vol_create| True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned -| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes -| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes -| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service -| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7' -| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod -| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment -| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment -| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment -| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default -| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage -| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default -| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage -| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** -| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized -| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible -| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' -| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods -| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin -| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes -| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi -| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service. -| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode -| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes -| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi -| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi -| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi -| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path +| Name | Default value | Description | +|--------------------------------------------------------|-------------------------|-----------------------------------------| +| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready +| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources +| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized +| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names +| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name +| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. +| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster +| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default +| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' +| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods +| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service +| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7' +| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod +| openshift_storage_glusterfs_block_host_vol_create | True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned +| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes +| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes +| openshift_storage_glusterfs_block_storageclass | False | Automatically create a StorageClass for each Gluster Block cluster +| openshift_storage_glusterfs_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default +| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service +| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7' +| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod +| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment +| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment +| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment +| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default +| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage +| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default +| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage +| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** +| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized +| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible +| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' +| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods +| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin +| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes +| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi +| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service. +| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode +| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes +| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi +| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi +| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi +| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path | openshift_storage_glusterfs_heketi_fstab | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed | openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` @@ -126,14 +128,16 @@ registry. These variables start with the prefix values in their corresponding non-registry variables. The following variables are an exception: -| Name | Default value | Description | -|-----------------------------------------------------------|-----------------------|-----------------------------------------| -| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs' -| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters -| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties -| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default -| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above -| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above +| Name | Default value | Description | +|-----------------------------------------------------------------|-----------------------|-----------------------------------------| +| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs' +| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters +| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties +| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default +| openshift_storage_glusterfs_registry_block_storageclass | False | It is recommended to not create a StorageClass for Gluster Block clusters serving registry storage, so as to avoid performance penalties +| openshift_storage_glusterfs_registry_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default +| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above +| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md): diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 4cbe262d2..7e751cc7a 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -14,6 +14,8 @@ openshift_storage_glusterfs_block_version: 'latest' openshift_storage_glusterfs_block_host_vol_create: True openshift_storage_glusterfs_block_host_vol_size: 100 openshift_storage_glusterfs_block_host_vol_max: 15 +openshift_storage_glusterfs_block_storageclass: False +openshift_storage_glusterfs_block_storageclass_default: False openshift_storage_glusterfs_s3_deploy: True openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}" openshift_storage_glusterfs_s3_version: 'latest' @@ -61,6 +63,8 @@ openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_gluste openshift_storage_glusterfs_registry_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}" openshift_storage_glusterfs_registry_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}" openshift_storage_glusterfs_registry_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}" +openshift_storage_glusterfs_registry_block_storageclass: False +openshift_storage_glusterfs_registry_block_storageclass_default: False openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}" openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}" openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}" @@ -103,3 +107,9 @@ r_openshift_storage_glusterfs_os_firewall_allow: port: "24008/tcp" - service: glusterfs_bricks port: "49152-49251/tcp" +- service: glusterblockd + port: "24010/tcp" +- service: iscsi-targets + port: "3260/tcp" +- service: rpcbind + port: "111/tcp" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index 001578406..a5fdae803 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -315,5 +315,31 @@ - include_tasks: glusterblock_deploy.yml when: glusterfs_block_deploy +- block: + - name: Create heketi block secret + oc_secret: + namespace: "{{ glusterfs_namespace }}" + state: present + name: "heketi-{{ glusterfs_name }}-admin-secret-block" + type: "gluster.org/glusterblock" + force: True + contents: + - path: key + data: "{{ glusterfs_heketi_admin_key }}" + when: glusterfs_heketi_admin_key is defined + - name: Generate Gluster Block StorageClass file + template: + src: "{{ openshift.common.examples_content_version }}/gluster-block-storageclass.yml.j2" + dest: "{{ mktemp.stdout }}/gluster-block-storageclass.yml" + + - name: Create Gluster Block StorageClass + oc_obj: + state: present + kind: storageclass + name: "glusterfs-{{ glusterfs_name }}-block" + files: + - "{{ mktemp.stdout }}/gluster-block-storageclass.yml" + when: glusterfs_block_storageclass + - include_tasks: gluster_s3_deploy.yml when: glusterfs_s3_deploy diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index a374df0ce..92de1b64d 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -17,6 +17,8 @@ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}" glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}" glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}" + glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_block_storageclass | bool }}" + glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_block_storageclass_default | bool }}" glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}" glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}" glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 544a6f491..befacb04f 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -17,6 +17,8 @@ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_registry_block_host_vol_create }}" glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_registry_block_host_vol_size }}" glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_registry_block_host_vol_max }}" + glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_registry_block_storageclass | bool }}" + glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_registry_block_storageclass_default | bool }}" glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}" glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}" glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}" diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf index dd4d6e6f7..bcc02e217 100644 --- a/roles/openshift_storage_glusterfs/templates/glusterfs.conf +++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf @@ -1,4 +1,7 @@ #{{ ansible_managed }} dm_thin_pool dm_snapshot -dm_mirror
\ No newline at end of file +dm_mirror +#glusterblock +dm_multipath +target_core_user diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" + chapauthenabled: "true" + hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} + restsecretnamespace: "{{ glusterfs_namespace }}" + restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" + chapauthenabled: "true" + hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} + restsecretnamespace: "{{ glusterfs_namespace }}" + restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" + chapauthenabled: "true" + hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} + restsecretnamespace: "{{ glusterfs_namespace }}" + restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index 2ec8db019..13bd5370c 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -1,8 +1,8 @@ -{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }} {{ openshift_hosted_registry_storage_nfs_options }} -{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }} {{ openshift_metrics_storage_nfs_options }} -{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }} {{ openshift_logging_storage_nfs_options }} -{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }} {{ openshift_loggingops_storage_nfs_options }} -{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }} {{ openshift_hosted_etcd_storage_nfs_options }} -{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }} {{ openshift_prometheus_storage_nfs_options }} -{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }} {{ openshift_prometheus_alertmanager_storage_nfs_options }} -{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }} {{ openshift_prometheus_alertbuffer_storage_nfs_options }} +"{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }}" {{ openshift_hosted_registry_storage_nfs_options }} +"{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }}" {{ openshift_metrics_storage_nfs_options }} +"{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }}" {{ openshift_logging_storage_nfs_options }} +"{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }}" {{ openshift_loggingops_storage_nfs_options }} +"{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }}" {{ openshift_hosted_etcd_storage_nfs_options }} +"{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }}" {{ openshift_prometheus_storage_nfs_options }} +"{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }}" {{ openshift_prometheus_alertmanager_storage_nfs_options }} +"{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }}" {{ openshift_prometheus_alertbuffer_storage_nfs_options }} diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml index e2e6538c9..513dff045 100644 --- a/roles/openshift_version/defaults/main.yml +++ b/roles/openshift_version/defaults/main.yml @@ -10,3 +10,4 @@ openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_typ openshift_use_crio_only: False l_first_master_version_task_file: "{{ openshift_is_containerized | ternary('first_master_containerized_version.yml', 'first_master_rpm_version.yml') }}" +l_force_image_tag_to_version: False diff --git a/roles/openshift_version/tasks/check_available_rpms.yml b/roles/openshift_version/tasks/check_available_rpms.yml index bdbc63d27..fea0daf77 100644 --- a/roles/openshift_version/tasks/check_available_rpms.yml +++ b/roles/openshift_version/tasks/check_available_rpms.yml @@ -1,7 +1,7 @@ --- - name: Get available {{ openshift_service_type}} version repoquery: - name: "{{ openshift_service_type}}" + name: "{{ openshift_service_type}}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}" ignore_excluders: true register: rpm_results diff --git a/roles/openshift_version/tasks/first_master.yml b/roles/openshift_version/tasks/first_master.yml index 374725086..e01a56dc1 100644 --- a/roles/openshift_version/tasks/first_master.yml +++ b/roles/openshift_version/tasks/first_master.yml @@ -24,7 +24,9 @@ - block: - debug: - msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}" + msg: "openshift_image_tag set to v{{ openshift_version }}" - set_fact: openshift_image_tag: v{{ openshift_version }} - when: openshift_image_tag is not defined + when: > + openshift_image_tag is not defined + or l_force_image_tag_to_version | bool diff --git a/roles/openshift_version/tasks/first_master_containerized_version.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml index e02a75eab..3ed1d2cfe 100644 --- a/roles/openshift_version/tasks/first_master_containerized_version.yml +++ b/roles/openshift_version/tasks/first_master_containerized_version.yml @@ -7,6 +7,7 @@ when: - openshift_image_tag is defined - openshift_version is not defined + - not (openshift_version_reinit | default(false)) - name: Set containerized version to configure if openshift_release specified set_fact: @@ -20,7 +21,7 @@ docker run --rm {{ openshift_cli_image }}:latest version register: cli_image_version when: - - openshift_version is not defined + - openshift_version is not defined or openshift_version_reinit | default(false) - not openshift_use_crio_only # Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a) @@ -34,7 +35,7 @@ - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" - when: openshift_version is not defined + when: openshift_version is not defined or openshift_version_reinit | default(false) # If we got an openshift_version like "3.2", lookup the latest 3.2 container version # and use that value instead. diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml index 264baca65..5d92f90c6 100644 --- a/roles/openshift_version/tasks/first_master_rpm_version.yml +++ b/roles/openshift_version/tasks/first_master_rpm_version.yml @@ -6,6 +6,7 @@ when: - openshift_pkg_version is defined - openshift_version is not defined + - not (openshift_version_reinit | default(false)) # These tasks should only be run against masters and nodes - name: Set openshift_version for rpm installation @@ -13,4 +14,7 @@ - set_fact: openshift_version: "{{ rpm_results.results.versions.available_versions.0 }}" - when: openshift_version is not defined + when: openshift_version is not defined or ( openshift_version_reinit | default(false) ) +- set_fact: + openshift_pkg_version: "-{{ rpm_results.results.versions.available_versions.0 }}" + when: openshift_version_reinit | default(false) diff --git a/roles/openshift_version/tasks/masters_and_nodes.yml b/roles/openshift_version/tasks/masters_and_nodes.yml index fbeb22d8b..eddd5ff42 100644 --- a/roles/openshift_version/tasks/masters_and_nodes.yml +++ b/roles/openshift_version/tasks/masters_and_nodes.yml @@ -6,9 +6,12 @@ include_tasks: check_available_rpms.yml - name: Fail if rpm version and docker image version are different fail: - msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}" + msg: "OCP rpm version {{ rpm_results.results.versions.available_versions.0 }} is different from OCP image version {{ openshift_version }}" # Both versions have the same string representation - when: rpm_results.results.versions.available_versions.0 != openshift_version + when: + - openshift_version not in rpm_results.results.versions.available_versions.0 + - openshift_version_reinit | default(false) + # block when when: not openshift_is_atomic | bool diff --git a/roles/openshift_web_console/defaults/main.yml b/roles/openshift_web_console/defaults/main.yml index 4f395398c..627db393a 100644 --- a/roles/openshift_web_console/defaults/main.yml +++ b/roles/openshift_web_console/defaults/main.yml @@ -1,3 +1,2 @@ --- -# TODO: This is temporary and will be updated to use taints and tolerations so that the console runs on the masters -openshift_web_console_nodeselector: {"region":"infra"} +openshift_web_console_nodeselector: {"node-role.kubernetes.io/master":"true"} diff --git a/roles/openshift_web_console/files/console-config.yaml b/roles/openshift_web_console/files/console-config.yaml new file mode 100644 index 000000000..55c650fbe --- /dev/null +++ b/roles/openshift_web_console/files/console-config.yaml @@ -0,0 +1,24 @@ +apiVersion: webconsole.config.openshift.io/v1 +kind: WebConsoleConfiguration +clusterInfo: + consolePublicURL: https://127.0.0.1:8443/console/ + loggingPublicURL: "" + logoutPublicURL: "" + masterPublicURL: https://127.0.0.1:8443 + metricsPublicURL: "" +extensions: + scriptURLs: [] + stylesheetURLs: [] + properties: null +features: + inactivityTimeoutMinutes: 0 + clusterResourceOverridesEnabled: false +servingInfo: + bindAddress: 0.0.0.0:8443 + bindNetwork: tcp4 + certFile: /var/serving-cert/tls.crt + clientCA: "" + keyFile: /var/serving-cert/tls.key + maxRequestsInFlight: 0 + namedCertificates: null + requestTimeoutSeconds: 0 diff --git a/roles/openshift_web_console/files/console-rbac-template.yaml b/roles/openshift_web_console/files/console-rbac-template.yaml new file mode 100644 index 000000000..9ee117199 --- /dev/null +++ b/roles/openshift_web_console/files/console-rbac-template.yaml @@ -0,0 +1,38 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: web-console-server-rbac +parameters: +- name: NAMESPACE + # This namespace cannot be changed. Only `openshift-web-console` is supported. + value: openshift-web-console +objects: + + +# allow grant powers to the webconsole server for cluster inspection +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRole + metadata: + name: system:openshift:web-console-server + rules: + - apiGroups: + - "servicecatalog.k8s.io" + resources: + - clusterservicebrokers + verbs: + - get + - list + - watch + +# Grant the service account for the web console +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: system:openshift:web-console-server + roleRef: + kind: ClusterRole + name: system:openshift:web-console-server + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: webconsole diff --git a/roles/openshift_web_console/files/console-template.yaml b/roles/openshift_web_console/files/console-template.yaml new file mode 100644 index 000000000..547e7a265 --- /dev/null +++ b/roles/openshift_web_console/files/console-template.yaml @@ -0,0 +1,127 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: openshift-web-console + annotations: + openshift.io/display-name: OpenShift Web Console + description: The server for the OpenShift web console. + iconClass: icon-openshift + tags: openshift,infra + openshift.io/documentation-url: https://github.com/openshift/origin-web-console-server + openshift.io/support-url: https://access.redhat.com + openshift.io/provider-display-name: Red Hat, Inc. +parameters: +- name: IMAGE + value: openshift/origin-web-console:latest +- name: NAMESPACE + # This namespace cannot be changed. Only `openshift-web-console` is supported. + value: openshift-web-console +- name: LOGLEVEL + value: "0" +- name: API_SERVER_CONFIG +- name: NODE_SELECTOR + value: "{}" +- name: REPLICA_COUNT + value: "1" +objects: + +# to create the web console server +- apiVersion: apps/v1beta1 + kind: Deployment + metadata: + namespace: ${NAMESPACE} + name: webconsole + labels: + app: openshift-web-console + webconsole: "true" + spec: + replicas: "${{REPLICA_COUNT}}" + strategy: + type: Recreate + template: + metadata: + name: webconsole + labels: + webconsole: "true" + spec: + serviceAccountName: webconsole + containers: + - name: webconsole + image: ${IMAGE} + imagePullPolicy: IfNotPresent + command: + - "/usr/bin/origin-web-console" + - "--audit-log-path=-" + - "-v=${LOGLEVEL}" + - "--config=/var/webconsole-config/webconsole-config.yaml" + ports: + - containerPort: 8443 + volumeMounts: + - mountPath: /var/serving-cert + name: serving-cert + - mountPath: /var/webconsole-config + name: webconsole-config + readinessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + livenessProbe: + httpGet: + path: / + port: 8443 + scheme: HTTPS + resources: + requests: + cpu: 100m + memory: 100Mi + nodeSelector: "${{NODE_SELECTOR}}" + volumes: + - name: serving-cert + secret: + defaultMode: 400 + secretName: webconsole-serving-cert + - name: webconsole-config + configMap: + defaultMode: 440 + name: webconsole-config + +# to create the config for the web console +- apiVersion: v1 + kind: ConfigMap + metadata: + namespace: ${NAMESPACE} + name: webconsole-config + labels: + app: openshift-web-console + data: + webconsole-config.yaml: ${API_SERVER_CONFIG} + +# to be able to assign powers to the process +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: webconsole + labels: + app: openshift-web-console + +# to be able to expose web console inside the cluster +- apiVersion: v1 + kind: Service + metadata: + namespace: ${NAMESPACE} + name: webconsole + labels: + app: openshift-web-console + annotations: + service.alpha.openshift.io/serving-cert-secret-name: webconsole-serving-cert + prometheus.io/scrape: "true" + prometheus.io/scheme: https + spec: + selector: + webconsole: "true" + ports: + - name: https + port: 443 + targetPort: 8443 diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml index 12916961b..f79a05c94 100644 --- a/roles/openshift_web_console/tasks/install.yml +++ b/roles/openshift_web_console/tasks/install.yml @@ -21,43 +21,128 @@ node_selector: - "" -- name: Make temp directory for asset config files +- name: Make temp directory for web console templates command: mktemp -d /tmp/console-ansible-XXXXXX register: mktemp changed_when: False -- name: Copy asset config template to temp directory +- name: Copy admin client config + command: > + cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: false + +- name: Copy web console templates to temp directory copy: - src: "{{ __console_files_location }}/{{ item }}" + src: "{{ item }}" dest: "{{ mktemp.stdout }}/{{ item }}" with_items: - "{{ __console_template_file }}" + - "{{ __console_rbac_file }}" - "{{ __console_config_file }}" -- name: Update asset config properties - yedit: - src: "{{ mktemp.stdout }}/{{ __console_config_file }}" - edits: - - key: logoutURL - value: "{{ openshift.master.logout_url | default('') }}" - - key: publicURL - # Must have a trailing slash - value: "{{ openshift.master.public_console_url }}/" - - key: masterPublicURL - value: "{{ openshift.master.public_api_url }}" +# Check if an existing webconsole-config config map exists. If so, use those +# contents so we don't overwrite changes. +- name: Read the existing web console config map + oc_configmap: + namespace: openshift-web-console + name: webconsole-config + state: list + register: webconsole_config_map + +- set_fact: + existing_config_map_data: "{{ webconsole_config_map.results.results[0].data | default({}) }}" + +- name: Copy the existing web console config to temp directory + copy: + content: "{{ existing_config_map_data['webconsole-config.yaml'] }}" + dest: "{{ mktemp.stdout }}/{{ __console_config_file }}" + when: existing_config_map_data['webconsole-config.yaml'] is defined + +# Generate a new config when a config map is not defined. +- when: existing_config_map_data['webconsole-config.yaml'] is not defined + block: + # Migrate the previous master-config.yaml asset config if it exists into the new + # web console config config map. + - name: Read existing assetConfig in master-config.yaml + slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: master_config_output + + - set_fact: + config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}" + + - set_fact: + cro_plugin_enabled: "{{ config_to_migrate.admissionConfig is defined and config_to_migrate.admissionConfig.pluginConfig is defined and config_to_migrate.admissionConfig.pluginConfig.ClusterResourceOverrides is defined }}" + + # Update properties in the config template based on inventory vars when the + # asset config does not exist. + - name: Set web console config properties from inventory variables + yedit: + src: "{{ mktemp.stdout }}/{{ __console_config_file }}" + edits: + - key: clusterInfo#consolePublicURL + # Must have a trailing slash + value: "{{ openshift.master.public_console_url }}/" + - key: clusterInfo#masterPublicURL + value: "{{ openshift.master.public_api_url }}" + - key: clusterInfo#logoutPublicURL + value: "{{ openshift.master.logout_url | default('') }}" + - key: features#inactivityTimeoutMinutes + value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}" + - key: features#clusterResourceOverridesEnabled + value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}" + - key: extensions#scriptURLs + value: "{{ openshift_web_console_extension_script_urls | default([]) }}" + - key: extensions#stylesheetURLs + value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}" + - key: extensions#properties + value: "{{ openshift_web_console_extension_properties | default({}) }}" + separator: '#' + state: present + when: config_to_migrate.assetConfig is not defined + + - name: Migrate assetConfig from master-config.yaml + yedit: + src: "{{ mktemp.stdout }}/{{ __console_config_file }}" + edits: + - key: clusterInfo#consolePublicURL + value: "{{ config_to_migrate.assetConfig.publicURL }}" + - key: clusterInfo#masterPublicURL + value: "{{ config_to_migrate.assetConfig.masterPublicURL }}" + - key: clusterInfo#logoutPublicURL + value: "{{ config_to_migrate.assetConfig.logoutURL | default('') }}" + - key: clusterInfo#metricsPublicURL + value: "{{ config_to_migrate.assetConfig.metricsPublicURL | default('') }}" + - key: clusterInfo#loggingPublicURL + value: "{{ config_to_migrate.assetConfig.loggingPublicURL | default('') }}" + - key: servingInfo#maxRequestsInFlight + value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}" + - key: servingInfo#requestTimeoutSeconds + value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}" + - key: features#clusterResourceOverridesEnabled + value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}" + separator: '#' + state: present + when: config_to_migrate.assetConfig is defined - slurp: src: "{{ mktemp.stdout }}/{{ __console_config_file }}" - register: config + register: updated_console_config + +- name: Reconcile with the web console RBAC file + shell: > + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_rbac_file }}" --config={{ mktemp.stdout }}/admin.kubeconfig + | {{ openshift_client_binary }} auth reconcile --config={{ mktemp.stdout }}/admin.kubeconfig -f - -- name: Apply template file +- name: Apply the web console template file shell: > {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_template_file }}" - --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}" + --param API_SERVER_CONFIG="{{ updated_console_config['content'] | b64decode }}" --param IMAGE="{{ openshift_web_console_prefix }}{{ openshift_web_console_image_name }}:{{ openshift_web_console_version }}" --param NODE_SELECTOR={{ openshift_web_console_nodeselector | to_json | quote }} --param REPLICA_COUNT="{{ openshift_web_console_replica_count }}" - | {{ openshift_client_binary }} apply -f - + --config={{ mktemp.stdout }}/admin.kubeconfig + | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f - - name: Verify that the web console is running command: > diff --git a/roles/openshift_web_console/tasks/remove_old_asset_config.yml b/roles/openshift_web_console/tasks/remove_old_asset_config.yml new file mode 100644 index 000000000..34158150c --- /dev/null +++ b/roles/openshift_web_console/tasks/remove_old_asset_config.yml @@ -0,0 +1,19 @@ +--- +# Remove the obsolete assetConfig stanza from master-config.yaml. Since the +# web console has been split out into a separate deployment, those settings +# are no longer used. +- name: Remove assetConfig from master-config.yaml + yedit: + state: absent + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: assetConfig + +# This file was written by wire_aggregator.yml. It is no longer needed since +# the web console now discovers if the template service broker is running on +# startup. Remove the file if it exists. +- name: Remove obsolete web console / service catalog extension file + file: + state: absent + # Hard-code the path instead of using `openshift.common.config_base` since + # the path is hard-coded in wire_aggregator.yml. + path: /etc/origin/master/openshift-ansible-catalog-console.js diff --git a/roles/openshift_web_console/tasks/rollout_console.yml b/roles/openshift_web_console/tasks/rollout_console.yml new file mode 100644 index 000000000..75682ba1d --- /dev/null +++ b/roles/openshift_web_console/tasks/rollout_console.yml @@ -0,0 +1,20 @@ +--- +- name: Check if console deployment exists + oc_obj: + kind: deployments + name: webconsole + namespace: openshift-web-console + state: list + register: console_deployment + +# There's currently no command to trigger a rollout for a k8s deployment +# without changing the pod spec. Add an annotation to force a rollout. +- name: Rollout updated web console deployment + oc_edit: + kind: deployments + name: webconsole + namespace: openshift-web-console + separator: '#' + content: + spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}" + when: console_deployment.results.results.0 | length > 0 diff --git a/roles/openshift_web_console/tasks/update_asset_config.yml b/roles/openshift_web_console/tasks/update_asset_config.yml deleted file mode 100644 index 0992b32e1..000000000 --- a/roles/openshift_web_console/tasks/update_asset_config.yml +++ /dev/null @@ -1,68 +0,0 @@ ---- -# This task updates asset config values in the webconsole-config config map in -# the openshift-web-console namespace. The values to set are pased in the -# variable `asset_config_edits`, which is an array of objects with `key` and -# `value` properties in the same format as `yedit` module `edits`. Only -# properties passed are updated. -# -# Note that this triggers a redeployment on the console and a brief downtime -# since it uses a `Recreate` strategy. -# -# Example usage: -# -# - include_role: -# name: openshift_web_console -# tasks_from: update_asset_config.yml -# vars: -# asset_config_edits: -# - key: loggingPublicURL -# value: "https://{{ openshift_logging_kibana_hostname }}" -# when: openshift_web_console_install | default(true) | bool - -- name: Read web console config map - oc_configmap: - namespace: openshift-web-console - name: webconsole-config - state: list - register: webconsole_config - -- name: Make temp directory - command: mktemp -d /tmp/console-ansible-XXXXXX - register: mktemp - changed_when: False - -- name: Copy asset config to temp file - copy: - content: "{{webconsole_config.results.results[0].data['webconsole-config.yaml']}}" - dest: "{{ mktemp.stdout }}/webconsole-config.yaml" - -- name: Change asset config properties - yedit: - src: "{{ mktemp.stdout }}/webconsole-config.yaml" - edits: "{{asset_config_edits}}" - -- name: Update web console config map - oc_configmap: - namespace: openshift-web-console - name: webconsole-config - state: present - from_file: - webconsole-config.yaml: "{{ mktemp.stdout }}/webconsole-config.yaml" - -- name: Remove temp directory - file: - state: absent - name: "{{ mktemp.stdout }}" - changed_when: False - -# There's currently no command to trigger a rollout for a k8s deployment -# without changing the pod spec. Add an annotation to force a rollout after -# the config map has been edited. -- name: Rollout updated web console deployment - oc_edit: - kind: deployments - name: webconsole - namespace: openshift-web-console - separator: '#' - content: - spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}" diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml new file mode 100644 index 000000000..967222ea4 --- /dev/null +++ b/roles/openshift_web_console/tasks/update_console_config.yml @@ -0,0 +1,67 @@ +--- +# This task updates asset config values in the webconsole-config config map in +# the openshift-web-console namespace. The values to set are pased in the +# variable `console_config_edits`, which is an array of objects with `key` and +# `value` properties in the same format as `yedit` module `edits`. Only +# properties passed are updated. The separator for nested properties is `#`. +# +# Note that this triggers a redeployment on the console and a brief downtime +# since it uses a `Recreate` strategy. +# +# Example usage: +# +# - include_role: +# name: openshift_web_console +# tasks_from: update_console_config.yml +# vars: +# console_config_edits: +# - key: clusterInfo#loggingPublicURL +# value: "https://{{ openshift_logging_kibana_hostname }}" +# when: openshift_web_console_install | default(true) | bool + +- name: Read the existing web console config map + oc_configmap: + namespace: openshift-web-console + name: webconsole-config + state: list + register: webconsole_config_map + +- set_fact: + existing_config_map_data: "{{ webconsole_config_map.results.results[0].data | default({}) }}" + +- when: existing_config_map_data['webconsole-config.yaml'] is defined + block: + - name: Make temp directory + command: mktemp -d /tmp/console-ansible-XXXXXX + register: mktemp_console + changed_when: False + + - name: Copy the existing web console config to temp directory + copy: + content: "{{ existing_config_map_data['webconsole-config.yaml'] }}" + dest: "{{ mktemp_console.stdout }}/webconsole-config.yaml" + + - name: Change web console config properties + yedit: + src: "{{ mktemp_console.stdout }}/webconsole-config.yaml" + edits: "{{console_config_edits}}" + separator: '#' + state: present + + - name: Update web console config map + oc_configmap: + namespace: openshift-web-console + name: webconsole-config + state: present + from_file: + webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml" + register: update_console_config_map + + - name: Remove temp directory + file: + state: absent + name: "{{ mktemp_console.stdout }}" + changed_when: False + + - include_tasks: rollout_console.yml + when: update_console_config_map.changed | bool diff --git a/roles/openshift_web_console/vars/default_images.yml b/roles/openshift_web_console/vars/default_images.yml index 7adb8a0d0..42d331ac5 100644 --- a/roles/openshift_web_console/vars/default_images.yml +++ b/roles/openshift_web_console/vars/default_images.yml @@ -1,4 +1,4 @@ --- -__openshift_web_console_prefix: "docker.io/openshift/" +__openshift_web_console_prefix: "docker.io/openshift/origin-" __openshift_web_console_version: "latest" -__openshift_web_console_image_name: "origin-web-console" +__openshift_web_console_image_name: "web-console" diff --git a/roles/openshift_web_console/vars/main.yml b/roles/openshift_web_console/vars/main.yml index 80bc56a17..72bff5d01 100644 --- a/roles/openshift_web_console/vars/main.yml +++ b/roles/openshift_web_console/vars/main.yml @@ -1,5 +1,4 @@ --- -__console_files_location: "../../../files/origin-components/" - __console_template_file: "console-template.yaml" +__console_rbac_file: "console-rbac-template.yaml" __console_config_file: "console-config.yaml" diff --git a/roles/openshift_web_console/vars/openshift-enterprise.yml b/roles/openshift_web_console/vars/openshift-enterprise.yml index 721ac1d27..375c22067 100644 --- a/roles/openshift_web_console/vars/openshift-enterprise.yml +++ b/roles/openshift_web_console/vars/openshift-enterprise.yml @@ -1,4 +1,4 @@ --- -__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/" +__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/ose-" __openshift_web_console_version: "v3.9" -__openshift_web_console_image_name: "ose-web-console" +__openshift_web_console_image_name: "web-console" diff --git a/roles/os_firewall/tasks/firewalld.yml b/roles/os_firewall/tasks/firewalld.yml index 4eae31596..fa933da51 100644 --- a/roles/os_firewall/tasks/firewalld.yml +++ b/roles/os_firewall/tasks/firewalld.yml @@ -2,7 +2,9 @@ - name: Fail - Firewalld is not supported on Atomic Host fail: msg: "Firewalld is not supported on Atomic Host" - when: r_os_firewall_is_atomic | bool + when: + - r_os_firewall_is_atomic | bool + - not openshift_enable_unsupported_configurations | default(false) - name: Install firewalld packages package: @@ -10,6 +12,7 @@ state: present register: result until: result is succeeded + when: not r_os_firewall_is_atomic | bool - name: Ensure iptables services are not enabled systemd: diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml index c32872d24..3465832cc 100644 --- a/roles/template_service_broker/defaults/main.yml +++ b/roles/template_service_broker/defaults/main.yml @@ -3,4 +3,4 @@ template_service_broker_remove: False template_service_broker_install: True openshift_template_service_broker_namespaces: ['openshift'] -template_service_broker_selector: { "region": "infra" } +template_service_broker_selector: "{{ openshift_hosted_infra_selector | default('region=infra') | map_from_pairs }}" diff --git a/roles/template_service_broker/files/apiserver-config.yaml b/roles/template_service_broker/files/apiserver-config.yaml new file mode 100644 index 000000000..e4048d1da --- /dev/null +++ b/roles/template_service_broker/files/apiserver-config.yaml @@ -0,0 +1,4 @@ +kind: TemplateServiceBrokerConfig +apiVersion: config.templateservicebroker.openshift.io/v1 +templateNamespaces: +- openshift diff --git a/roles/template_service_broker/files/apiserver-template.yaml b/roles/template_service_broker/files/apiserver-template.yaml new file mode 100644 index 000000000..4dd9395d0 --- /dev/null +++ b/roles/template_service_broker/files/apiserver-template.yaml @@ -0,0 +1,125 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-apiserver +parameters: +- name: IMAGE + value: openshift/origin-template-service-broker:latest +- name: NAMESPACE + value: openshift-template-service-broker +- name: LOGLEVEL + value: "0" +- name: API_SERVER_CONFIG + value: | + kind: TemplateServiceBrokerConfig + apiVersion: config.templateservicebroker.openshift.io/v1 + templateNamespaces: + - openshift +- name: NODE_SELECTOR + value: "{}" +objects: + +# to create the tsb server +- apiVersion: extensions/v1beta1 + kind: DaemonSet + metadata: + namespace: ${NAMESPACE} + name: apiserver + labels: + apiserver: "true" + spec: + template: + metadata: + name: apiserver + labels: + apiserver: "true" + spec: + serviceAccountName: apiserver + containers: + - name: c + image: ${IMAGE} + imagePullPolicy: IfNotPresent + command: + - "/usr/bin/template-service-broker" + - "start" + - "template-service-broker" + - "--secure-port=8443" + - "--audit-log-path=-" + - "--tls-cert-file=/var/serving-cert/tls.crt" + - "--tls-private-key-file=/var/serving-cert/tls.key" + - "--v=${LOGLEVEL}" + - "--config=/var/apiserver-config/apiserver-config.yaml" + ports: + - containerPort: 8443 + volumeMounts: + - mountPath: /var/serving-cert + name: serving-cert + - mountPath: /var/apiserver-config + name: apiserver-config + readinessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + nodeSelector: "${{NODE_SELECTOR}}" + volumes: + - name: serving-cert + secret: + defaultMode: 420 + secretName: apiserver-serving-cert + - name: apiserver-config + configMap: + defaultMode: 420 + name: apiserver-config + +# to create the config for the TSB +- apiVersion: v1 + kind: ConfigMap + metadata: + namespace: ${NAMESPACE} + name: apiserver-config + data: + apiserver-config.yaml: ${API_SERVER_CONFIG} + +# to be able to assign powers to the process +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: apiserver + +# to be able to expose TSB inside the cluster +- apiVersion: v1 + kind: Service + metadata: + namespace: ${NAMESPACE} + name: apiserver + annotations: + service.alpha.openshift.io/serving-cert-secret-name: apiserver-serving-cert + spec: + selector: + apiserver: "true" + ports: + - port: 443 + targetPort: 8443 + +# This service account will be granted permission to call the TSB. +# The token for this SA will be provided to the service catalog for +# use when calling the TSB. +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-client + +# This secret will be populated with a copy of the templateservicebroker-client SA's +# auth token. Since this secret has a static name, it can be referenced more +# easily than the auto-generated secret for the service account. +- apiVersion: v1 + kind: Secret + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-client + annotations: + kubernetes.io/service-account.name: templateservicebroker-client + type: kubernetes.io/service-account-token diff --git a/roles/template_service_broker/files/rbac-template.yaml b/roles/template_service_broker/files/rbac-template.yaml new file mode 100644 index 000000000..0937a9065 --- /dev/null +++ b/roles/template_service_broker/files/rbac-template.yaml @@ -0,0 +1,92 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-rbac +parameters: +- name: NAMESPACE + value: openshift-template-service-broker +- name: KUBE_SYSTEM + value: kube-system +objects: + +# Grant the service account permission to call the TSB +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: templateservicebroker-client + roleRef: + kind: ClusterRole + name: system:openshift:templateservicebroker-client + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: templateservicebroker-client + +# to delegate authentication and authorization +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: auth-delegator-${NAMESPACE} + roleRef: + kind: ClusterRole + name: system:auth-delegator + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# to have the template service broker powers +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: tsb-${NAMESPACE} + roleRef: + kind: ClusterRole + name: system:openshift:controller:template-service-broker + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# to read the config for terminating authentication +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + namespace: ${KUBE_SYSTEM} + name: extension-apiserver-authentication-reader-${NAMESPACE} + roleRef: + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# allow the kube service catalog's SA to read the static secret defined +# above, which will contain the token for the SA that can call the TSB. +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: Role + metadata: + name: templateservicebroker-auth-reader + namespace: ${NAMESPACE} + rules: + - apiGroups: + - "" + resourceNames: + - templateservicebroker-client + resources: + - secrets + verbs: + - get +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-auth-reader + roleRef: + kind: Role + name: templateservicebroker-auth-reader + subjects: + - kind: ServiceAccount + namespace: kube-service-catalog + name: service-catalog-controller diff --git a/roles/template_service_broker/files/template-service-broker-registration.yaml b/roles/template_service_broker/files/template-service-broker-registration.yaml new file mode 100644 index 000000000..95fb72924 --- /dev/null +++ b/roles/template_service_broker/files/template-service-broker-registration.yaml @@ -0,0 +1,25 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-registration +parameters: +- name: TSB_NAMESPACE + value: openshift-template-service-broker +- name: CA_BUNDLE + required: true +objects: +# register the tsb with the service catalog +- apiVersion: servicecatalog.k8s.io/v1beta1 + kind: ClusterServiceBroker + metadata: + name: template-service-broker + spec: + url: https://apiserver.${TSB_NAMESPACE}.svc:443/brokers/template.openshift.io + insecureSkipTLSVerify: false + caBundle: ${CA_BUNDLE} + authInfo: + bearer: + secretRef: + kind: Secret + name: templateservicebroker-client + namespace: ${TSB_NAMESPACE} diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index 604e94602..d0a07c48d 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -22,8 +22,13 @@ register: mktemp changed_when: False +- name: Copy admin client config + command: > + cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: false + - copy: - src: "{{ __tsb_files_location }}/{{ item }}" + src: "{{ item }}" dest: "{{ mktemp.stdout }}/{{ item }}" with_items: - "{{ __tsb_template_file }}" @@ -43,16 +48,18 @@ - name: Apply template file shell: > - {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" + {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig + -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}" --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" --param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }} - | {{ openshift_client_binary }} apply -f - + | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f - # reconcile with rbac - name: Reconcile with RBAC file shell: > - {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f - + {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" + | {{ openshift_client_binary }} auth reconcile --config={{ mktemp.stdout }}/admin.kubeconfig -f - # Check that the TSB is running - name: Verify that TSB is running @@ -79,9 +86,15 @@ # Register with broker - name: Register TSB with broker shell: > - {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply -f - + {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f - - file: state: absent name: "{{ mktemp.stdout }}" changed_when: False + +- name: Rollout console so it discovers the template service broker is installed + include_role: + name: openshift_web_console + tasks_from: rollout_console.yml + when: openshift_web_console_install | default(true) | bool diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml index db1b558e4..b46dd4771 100644 --- a/roles/template_service_broker/tasks/remove.yml +++ b/roles/template_service_broker/tasks/remove.yml @@ -3,8 +3,13 @@ register: mktemp changed_when: False +- name: Copy admin client config + command: > + cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: false + - copy: - src: "{{ __tsb_files_location }}/{{ item }}" + src: "{{ item }}" dest: "{{ mktemp.stdout }}/{{ item }}" with_items: - "{{ __tsb_template_file }}" @@ -12,11 +17,11 @@ - name: Delete TSB broker shell: > - {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f - + {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --config={{ mktemp.stdout }}/admin.kubeconfig --ignore-not-found -f - - name: Delete TSB objects shell: > - {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f - + {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --config={{ mktemp.stdout }}/admin.kubeconfig --ignore-not-found -f - - name: empty out tech preview extension file for service console UI copy: @@ -31,3 +36,9 @@ state: absent name: "{{ mktemp.stdout }}" changed_when: False + +- name: Rollout console so it discovers the template service broker is removed + include_role: + name: openshift_web_console + tasks_from: rollout_console.yml + when: openshift_web_console_install | default(true) | bool diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml index 662d65d9f..dc164a4db 100644 --- a/roles/template_service_broker/vars/default_images.yml +++ b/roles/template_service_broker/vars/default_images.yml @@ -1,4 +1,4 @@ --- -__template_service_broker_prefix: "docker.io/openshift/" +__template_service_broker_prefix: "docker.io/openshift/origin-" __template_service_broker_version: "latest" -__template_service_broker_image_name: "origin-template-service-broker" +__template_service_broker_image_name: "template-service-broker" diff --git a/roles/template_service_broker/vars/main.yml b/roles/template_service_broker/vars/main.yml index a65340f16..7dec24a79 100644 --- a/roles/template_service_broker/vars/main.yml +++ b/roles/template_service_broker/vars/main.yml @@ -1,6 +1,4 @@ --- -__tsb_files_location: "../../../files/origin-components/" - __tsb_template_file: "apiserver-template.yaml" __tsb_config_file: "apiserver-config.yaml" __tsb_rbac_file: "rbac-template.yaml" diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml index 16a08e72f..b65b97691 100644 --- a/roles/template_service_broker/vars/openshift-enterprise.yml +++ b/roles/template_service_broker/vars/openshift-enterprise.yml @@ -1,4 +1,4 @@ --- -__template_service_broker_prefix: "registry.access.redhat.com/openshift3/" +__template_service_broker_prefix: "registry.access.redhat.com/openshift3/ose-" __template_service_broker_version: "v3.7" -__template_service_broker_image_name: "ose-template-service-broker" +__template_service_broker_image_name: "template-service-broker" diff --git a/roles/tuned/tasks/main.yml b/roles/tuned/tasks/main.yml index 4a28d47b2..5129f4471 100644 --- a/roles/tuned/tasks/main.yml +++ b/roles/tuned/tasks/main.yml @@ -28,7 +28,12 @@ when: item.state == 'file' - name: Make tuned use the recommended tuned profile on restart - file: path=/etc/tuned/active_profile state=absent + file: + path: '{{ item }}' + state: absent + with_items: + - /etc/tuned/active_profile + - /etc/tuned/profile_mode - name: Restart tuned service systemd: |