summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/ansible_service_broker/tasks/install.yml26
-rw-r--r--roles/docker/defaults/main.yml1
-rw-r--r--roles/docker/tasks/package_docker.yml11
-rw-r--r--roles/kuryr/README.md38
-rw-r--r--roles/kuryr/defaults/main.yaml72
-rw-r--r--roles/kuryr/meta/main.yml17
-rw-r--r--roles/kuryr/tasks/master.yaml52
-rw-r--r--roles/kuryr/tasks/node.yaml48
-rw-r--r--roles/kuryr/tasks/serviceaccount.yaml31
-rw-r--r--roles/kuryr/templates/cni-daemonset.yaml.j253
-rw-r--r--roles/kuryr/templates/configmap.yaml.j2343
-rw-r--r--roles/kuryr/templates/controller-deployment.yaml.j240
-rw-r--r--roles/lib_openshift/library/oc_adm_csr.py16
-rw-r--r--roles/lib_openshift/src/class/oc_adm_csr.py16
-rw-r--r--roles/openshift_aws/defaults/main.yml22
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml23
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml8
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml2
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml7
-rw-r--r--roles/openshift_aws/templates/user_data.j226
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py77
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh2
-rw-r--r--roles/openshift_hosted_facts/tasks/main.yml2
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml178
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j24
-rw-r--r--roles/openshift_logging_elasticsearch/vars/default_images.yml3
-rw-r--r--roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml3
-rw-r--r--roles/openshift_logging_eventrouter/templates/eventrouter-template.j24
-rw-r--r--roles/openshift_master/defaults/main.yml88
-rw-r--r--roles/openshift_master/meta/main.yml1
-rw-r--r--roles/openshift_master/tasks/bootstrap.yml63
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j22
-rw-r--r--roles/openshift_node/defaults/main.yml8
-rw-r--r--roles/openshift_node/files/bootstrap.yml63
-rw-r--r--roles/openshift_node/handlers/main.yml11
-rw-r--r--roles/openshift_node/tasks/aws.yml21
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml55
-rw-r--r--roles/openshift_node/tasks/config.yml64
-rw-r--r--roles/openshift_node/tasks/install.yml4
-rw-r--r--roles/openshift_node/tasks/main.yml9
-rw-r--r--roles/openshift_node/templates/node.service.j26
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml34
-rw-r--r--roles/openshift_prometheus/files/openshift_prometheus.exports3
-rw-r--r--roles/openshift_prometheus/tasks/create_pvs.yaml36
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml9
-rw-r--r--roles/openshift_prometheus/tasks/nfs.yaml44
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-server.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prometheus_deployment.j22
-rw-r--r--roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml110
-rw-r--r--roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml16
-rw-r--r--roles/openshift_service_catalog/tasks/generate_certs.yml17
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml8
-rw-r--r--roles/openshift_service_catalog/tasks/remove.yml4
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j24
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j219
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml3
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j23
-rw-r--r--roles/template_service_broker/tasks/install.yml4
62 files changed, 1466 insertions, 421 deletions
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index 0f4b71124..9a91927b8 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -30,8 +30,12 @@
ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"
ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}"
+- set_fact:
+ openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+ when: openshift_master_config_dir is undefined
+
- slurp:
- src: "{{ ansible_service_broker_certs_dir }}/ca.crt"
+ src: "{{ openshift_master_config_dir }}/service-signer.crt"
register: catalog_ca
@@ -231,6 +235,20 @@
value: /etc/ansible-service-broker/config.yaml
resources: {}
terminationMessagePath: /tmp/termination-log
+ readinessProbe:
+ httpGet:
+ port: 1338
+ path: /healthz
+ scheme: HTTPS
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ livenessProbe:
+ httpGet:
+ port: 1338
+ path: /healthz
+ scheme: HTTPS
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
- image: "{{ ansible_service_broker_etcd_image }}"
name: etcd
@@ -327,12 +345,12 @@
oc_obj:
name: ansible-service-broker
state: present
- kind: ServiceBroker
+ kind: ClusterServiceBroker
content:
path: /tmp/brokerout
data:
- apiVersion: servicecatalog.k8s.io/v1alpha1
- kind: ServiceBroker
+ apiVersion: servicecatalog.k8s.io/v1beta1
+ kind: ClusterServiceBroker
metadata:
name: ansible-service-broker
spec:
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index e36dfa7b9..1c830cb4e 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -1,5 +1,6 @@
---
docker_cli_auth_config_path: '/root/.docker'
+openshift_docker_signature_verification: False
# oreg_url is defined by user input.
oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index 888ae40e7..7ccab37a5 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -115,11 +115,12 @@
dest: /etc/sysconfig/docker
regexp: '^OPTIONS=.*$'
line: "OPTIONS='\
- {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %}\
- {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\
- {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\
- {% if docker_options is defined %} {{ docker_options }}{% endif %}\
- {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
+ {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \
+ {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %} \
+ {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \
+ {% if docker_options is defined %} {{ docker_options }}{% endif %} \
+ {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %} \
+ --signature-verification={{ openshift_docker_signature_verification | bool }}'"
when: docker_check.stat.isreg is defined and docker_check.stat.isreg
notify:
- restart docker
diff --git a/roles/kuryr/README.md b/roles/kuryr/README.md
new file mode 100644
index 000000000..7b618f902
--- /dev/null
+++ b/roles/kuryr/README.md
@@ -0,0 +1,38 @@
+## OpenStack Kuryr
+
+Install Kuryr CNI components (kuryr-controller, kuryr-cni) on Master and worker
+nodes. Kuryr uses OpenStack Networking service (Neutron) to provide network for
+pods. This allows to have interconnectivity between pods and OpenStack VMs.
+
+## Requirements
+
+* Ansible 2.2+
+* Centos/ RHEL 7.3+
+
+## Current Kuryr restrictions when used with OpenShift
+
+* Openshift Origin only
+* OpenShift on OpenStack Newton or newer (only with Trunk ports)
+
+## Key Ansible inventory Kuryr master configuration parameters
+
+* ``openshift_use_kuryr=True``
+* ``openshift_use_openshift_sdn=False``
+* ``openshift_sdn_network_plugin_name='cni'``
+* ``kuryr_cni_link_interface=eth0``
+* ``kuryr_openstack_auth_url=keystone_url``
+* ``kuryr_openstack_user_domain_name=Default``
+* ``kuryr_openstack_user_project_name=Default``
+* ``kuryr_openstack_project_id=project_uuid``
+* ``kuryr_openstack_username=kuryr``
+* ``kuryr_openstack_password=kuryr_pass``
+* ``kuryr_openstack_pod_sg_id=pod_security_group_uuid``
+* ``kuryr_openstack_pod_subnet_id=pod_subnet_uuid``
+* ``kuryr_openstack_pod_service_id=service_subnet_uuid``
+* ``kuryr_openstack_pod_project_id=pod_project_uuid``
+* ``kuryr_openstack_worker_nodes_subnet_id=worker_nodes_subnet_uuid``
+
+## Kuryr resources
+
+* [Kuryr documentation](https://docs.openstack.org/kuryr-kubernetes/latest/)
+* [Installing Kuryr containerized](https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html)
diff --git a/roles/kuryr/defaults/main.yaml b/roles/kuryr/defaults/main.yaml
new file mode 100644
index 000000000..ff298dda0
--- /dev/null
+++ b/roles/kuryr/defaults/main.yaml
@@ -0,0 +1,72 @@
+---
+# Kuryr conf directory
+kuryr_config_dir: /etc/kuryr
+
+# Kuryr username
+kuryr_openstack_username: kuryr
+
+# Kuryr username domain
+kuryr_openstack_user_domain_name: default
+
+# Kuryr username domain
+kuryr_openstack_project_domain_name: default
+
+# Kuryr OpenShift namespace
+kuryr_namespace: kube-system
+
+# Whether to run the cni plugin in debug mode
+kuryr_cni_debug: "false"
+
+# The version of cni binaries
+cni_version: v0.5.2
+
+# Path to bin dir (where kuryr execs get installed)
+bin_dir: /usr/bin
+
+# Path to the cni binaries
+cni_bin_dir: /opt/cni/bin
+
+# URL for cni binaries
+cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/"
+cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tgz"
+cni_bin_checksum: "71f411080245aa14d0cc06f6824e8039607dd9e9"
+
+# Kuryr ClusterRole definiton
+kuryr_clusterrole:
+ name: kuryrctl
+ state: present
+ rules:
+ - apiGroups:
+ - ""
+ attributeRestrictions: null
+ verbs:
+ - get
+ - list
+ - watch
+ resources:
+ - daemonsets
+ - deployments
+ - deploymentconfigs
+ - endpoints
+ - ingress
+ - nodes
+ - namespaces
+ - pods
+ - projects
+ - routes
+ - services
+ - apiGroups:
+ - ""
+ attributeRestrictions: null
+ verbs:
+ - update
+ - patch
+ resources:
+ - endpoints
+ - ingress
+ - pods
+ - namespaces
+ - nodes
+ - services
+ - services/status
+ - routes
diff --git a/roles/kuryr/meta/main.yml b/roles/kuryr/meta/main.yml
new file mode 100644
index 000000000..7fd5adf41
--- /dev/null
+++ b/roles/kuryr/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Red Hat
+ description: Kuryr networking
+ company: Red Hat
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: lib_openshift }
+- { role: openshift_facts }
diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml
new file mode 100644
index 000000000..55ab16f74
--- /dev/null
+++ b/roles/kuryr/tasks/master.yaml
@@ -0,0 +1,52 @@
+---
+- name: Perform OpenShit ServiceAccount config
+ include: serviceaccount.yaml
+
+- name: Create kuryr manifests tempdir
+ command: mktemp -d
+ register: manifests_tmpdir
+
+- name: Create kuryr ConfigMap manifest
+ become: yes
+ template:
+ src: configmap.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/configmap.yaml"
+
+- name: Create kuryr-controller Deployment manifest
+ become: yes
+ template:
+ src: controller-deployment.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml"
+
+- name: Create kuryr-cni DaemonSet manifest
+ become: yes
+ template:
+ src: cni-daemonset.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml"
+
+- name: Apply ConfigMap manifest
+ oc_obj:
+ state: present
+ kind: ConfigMap
+ name: "kuryr-config"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/configmap.yaml"
+
+- name: Apply Controller Deployment manifest
+ oc_obj:
+ state: present
+ kind: Deployment
+ name: "kuryr-controller"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml"
+
+- name: Apply kuryr-cni DaemonSet manifest
+ oc_obj:
+ state: present
+ kind: DaemonSet
+ name: "kuryr-cni-ds"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml"
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
new file mode 100644
index 000000000..ffe814713
--- /dev/null
+++ b/roles/kuryr/tasks/node.yaml
@@ -0,0 +1,48 @@
+---
+- name: Create CNI bin directory
+ file:
+ state: directory
+ path: "{{ cni_bin_dir }}"
+ mode: 0755
+ owner: root
+ group: root
+ recurse: yes
+
+- name: Create CNI extraction tempdir
+ command: mktemp -d
+ register: cni_tmpdir
+
+- name: Download CNI
+ get_url:
+ url: "{{ cni_bin_url }}"
+ checksum: "sha1:{{ cni_bin_checksum }}"
+ mode: 0644
+ dest: "{{ cni_tmpdir.stdout }}"
+ register: downloaded_tarball
+
+- name: Extract CNI
+ become: yes
+ unarchive:
+ remote_src: True
+ src: "{{ downloaded_tarball.dest }}"
+ dest: "{{ cni_bin_dir }}"
+ when: downloaded_tarball.changed
+
+- name: Ensure CNI net.d exists
+ file:
+ path: /etc/cni/net.d
+ recurse: yes
+ state: directory
+
+- name: Configure OpenShift node with disabled service proxy
+ lineinfile:
+ dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node"
+ regexp: '^OPTIONS="?(.*?)"?$'
+ backrefs: yes
+ backup: yes
+ line: 'OPTIONS="\1 --disable dns,proxy,plugins"'
+
+- name: force node restart to disable the proxy
+ service:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
diff --git a/roles/kuryr/tasks/serviceaccount.yaml b/roles/kuryr/tasks/serviceaccount.yaml
new file mode 100644
index 000000000..088f13091
--- /dev/null
+++ b/roles/kuryr/tasks/serviceaccount.yaml
@@ -0,0 +1,31 @@
+---
+- name: Create Controller service account
+ oc_serviceaccount:
+ name: kuryr-controller
+ namespace: "{{ kuryr_namespace }}"
+ register: saout
+
+- name: Create a role for the Kuryr
+ oc_clusterrole: "{{ kuryr_clusterrole }}"
+
+- name: Fetch the created Kuryr controller cluster role
+ oc_clusterrole:
+ name: kuryrctl
+ state: list
+ register: crout
+
+- name: Grant Kuryr the privileged security context constraints
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}"
+ namespace: "{{ kuryr_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+
+- name: Assign role to Kuryr service account
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}"
+ namespace: "{{ kuryr_namespace }}"
+ resource_kind: cluster-role
+ resource_name: "{{ crout.results.results.metadata.name }}"
+ state: present
diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2
new file mode 100644
index 000000000..39348ae90
--- /dev/null
+++ b/roles/kuryr/templates/cni-daemonset.yaml.j2
@@ -0,0 +1,53 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: kuryr-cni-ds
+ namespace: {{ kuryr_namespace }}
+ labels:
+ tier: node
+ app: kuryr
+spec:
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: kuryr
+ spec:
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: kuryr-controller
+ containers:
+ - name: kuryr-cni
+ image: kuryr/cni:latest
+ imagePullPolicy: IfNotPresent
+ command: [ "cni_ds_init" ]
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: bin
+ mountPath: /opt/cni/bin
+ - name: net-conf
+ mountPath: /etc/cni/net.d
+ - name: config-volume
+ mountPath: /tmp/kuryr/kuryr.conf
+ subPath: kuryr-cni.conf
+ - name: etc
+ mountPath: /etc
+ volumes:
+ - name: bin
+ hostPath:
+ path: {{ cni_bin_dir }}
+ - name: net-conf
+ hostPath:
+ path: /etc/cni/net.d
+ - name: config-volume
+ configMap:
+ name: kuryr-config
+ - name: etc
+ hostPath:
+ path: /etc \ No newline at end of file
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
new file mode 100644
index 000000000..e874d6c25
--- /dev/null
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -0,0 +1,343 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kuryr-config
+ namespace: {{ kuryr_namespace }}
+data:
+ kuryr.conf: |+
+ [DEFAULT]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Directory for Kuryr vif binding executables. (string value)
+ #bindir = /usr/libexec/kuryr
+
+ # If set to true, the logging level will be set to DEBUG instead of the default
+ # INFO level. (boolean value)
+ # Note: This option can be changed without restarting.
+ #debug = false
+
+ # DEPRECATED: If set to false, the logging level will be set to WARNING instead
+ # of the default INFO level. (boolean value)
+ # This option is deprecated for removal.
+ # Its value may be silently ignored in the future.
+ #verbose = true
+
+ # The name of a logging configuration file. This file is appended to any
+ # existing logging configuration files. For details about logging configuration
+ # files, see the Python logging module documentation. Note that when logging
+ # configuration files are used then all logging configuration is set in the
+ # configuration file and other logging configuration options are ignored (for
+ # example, logging_context_format_string). (string value)
+ # Note: This option can be changed without restarting.
+ # Deprecated group/name - [DEFAULT]/log_config
+ #log_config_append = <None>
+
+ # Defines the format string for %%(asctime)s in log records. Default:
+ # %(default)s . This option is ignored if log_config_append is set. (string
+ # value)
+ #log_date_format = %Y-%m-%d %H:%M:%S
+
+ # (Optional) Name of log file to send logging output to. If no default is set,
+ # logging will go to stderr as defined by use_stderr. This option is ignored if
+ # log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logfile
+ #log_file = /var/log/kuryr/kuryr-controller.log
+
+ # (Optional) The base directory used for relative log_file paths. This option
+ # is ignored if log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logdir
+ #log_dir = <None>
+
+ # Uses logging handler designed to watch file system. When log file is moved or
+ # removed this handler will open a new log file with specified path
+ # instantaneously. It makes sense only if log_file option is specified and
+ # Linux platform is used. This option is ignored if log_config_append is set.
+ # (boolean value)
+ #watch_log_file = false
+
+ # Use syslog for logging. Existing syslog format is DEPRECATED and will be
+ # changed later to honor RFC5424. This option is ignored if log_config_append
+ # is set. (boolean value)
+ #use_syslog = false
+
+ # Syslog facility to receive log lines. This option is ignored if
+ # log_config_append is set. (string value)
+ #syslog_log_facility = LOG_USER
+
+ # Log output to standard error. This option is ignored if log_config_append is
+ # set. (boolean value)
+ #use_stderr = true
+
+ # Format string to use for log messages with context. (string value)
+ #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+ # Format string to use for log messages when context is undefined. (string
+ # value)
+ #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+ # Additional data to append to log message when logging level for the message
+ # is DEBUG. (string value)
+ #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+ # Prefix each line of exception output with this format. (string value)
+ #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+ # Defines the format string for %(user_identity)s that is used in
+ # logging_context_format_string. (string value)
+ #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+ # List of package logging levels in logger=LEVEL pairs. This option is ignored
+ # if log_config_append is set. (list value)
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+ # Enables or disables publication of error events. (boolean value)
+ #publish_errors = false
+
+ # The format for an instance that is passed with the log message. (string
+ # value)
+ #instance_format = "[instance: %(uuid)s] "
+
+ # The format for an instance UUID that is passed with the log message. (string
+ # value)
+ #instance_uuid_format = "[instance: %(uuid)s] "
+
+ # Enables or disables fatal status of deprecations. (boolean value)
+ #fatal_deprecations = false
+
+
+ [binding]
+
+ driver = kuryr.lib.binding.drivers.vlan
+ link_iface = eth0
+
+ [kubernetes]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The root URL of the Kubernetes API (string value)
+ api_root = {{ openshift.master.api_url }}
+
+ # Absolute path to client cert to connect to HTTPS K8S_API (string value)
+ # ssl_client_crt_file = /etc/kuryr/controller.crt
+
+ # Absolute path client key file to connect to HTTPS K8S_API (string value)
+ # ssl_client_key_file = /etc/kuryr/controller.key
+
+ # Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
+ ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+
+ # The token to talk to the k8s API
+ token_file = /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ # HTTPS K8S_API server identity verification (boolean value)
+ # TODO (apuimedo): Make configurable
+ ssl_verify_server_crt = True
+
+ # The driver to determine OpenStack project for pod ports (string value)
+ pod_project_driver = default
+
+ # The driver to determine OpenStack project for services (string value)
+ service_project_driver = default
+
+ # The driver to determine Neutron subnets for pod ports (string value)
+ pod_subnets_driver = default
+
+ # The driver to determine Neutron subnets for services (string value)
+ service_subnets_driver = default
+
+ # The driver to determine Neutron security groups for pods (string value)
+ pod_security_groups_driver = default
+
+ # The driver to determine Neutron security groups for services (string value)
+ service_security_groups_driver = default
+
+ # The driver that provides VIFs for Kubernetes Pods. (string value)
+ pod_vif_driver = nested-vlan
+
+
+ [neutron]
+ # Configuration options for OpenStack Neutron
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Authentication URL (string value)
+ auth_url = {{ kuryr_openstack_auth_url }}
+
+ # Authentication type to load (string value)
+ # Deprecated group/name - [neutron]/auth_plugin
+ auth_type = password
+
+ # Domain ID to scope to (string value)
+ user_domain_name = {{ kuryr_openstack_user_domain_name }}
+
+ # User's password (string value)
+ password = {{ kuryr_openstack_password }}
+
+ # Domain name containing project (string value)
+ project_domain_name = {{ kuryr_openstack_project_domain_name }}
+
+ # Project ID to scope to (string value)
+ # Deprecated group/name - [neutron]/tenant-id
+ project_id = {{ kuryr_openstack_project_id }}
+
+ # Token (string value)
+ #token = <None>
+
+ # Trust ID (string value)
+ #trust_id = <None>
+
+ # User's domain id (string value)
+ #user_domain_id = <None>
+
+ # User id (string value)
+ #user_id = <None>
+
+ # Username (string value)
+ # Deprecated group/name - [neutron]/user-name
+ username = {{kuryr_openstack_username }}
+
+ # Whether a plugging operation is failed if the port to plug does not become
+ # active (boolean value)
+ #vif_plugging_is_fatal = false
+
+ # Seconds to wait for port to become active (integer value)
+ #vif_plugging_timeout = 0
+
+ [neutron_defaults]
+
+ pod_security_groups = {{ kuryr_openstack_pod_sg_id }}
+ pod_subnet = {{ kuryr_openstack_pod_subnet_id }}
+ service_subnet = {{ kuryr_openstack_service_subnet_id }}
+ project = {{ kuryr_openstack_pod_project_id }}
+ # TODO (apuimedo): Remove the duplicated line just after this one once the
+ # RDO packaging contains the upstream patch
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+ [pod_vif_nested]
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+ kuryr-cni.conf: |+
+ [DEFAULT]
+
+ #
+ # From kuryr_kubernetes
+ #
+ # If set to true, the logging level will be set to DEBUG instead of the default
+ # INFO level. (boolean value)
+ # Note: This option can be changed without restarting.
+ #debug = false
+
+ # The name of a logging configuration file. This file is appended to any
+ # existing logging configuration files. For details about logging configuration
+ # files, see the Python logging module documentation. Note that when logging
+ # configuration files are used then all logging configuration is set in the
+ # configuration file and other logging configuration options are ignored (for
+ # example, logging_context_format_string). (string value)
+ # Note: This option can be changed without restarting.
+ # Deprecated group/name - [DEFAULT]/log_config
+ #log_config_append = <None>
+
+ # Defines the format string for %%(asctime)s in log records. Default:
+ # %(default)s . This option is ignored if log_config_append is set. (string
+ # value)
+ #log_date_format = %Y-%m-%d %H:%M:%S
+
+ # (Optional) Name of log file to send logging output to. If no default is set,
+ # logging will go to stderr as defined by use_stderr. This option is ignored if
+ # log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logfile
+ #log_file = /var/log/kuryr/cni.log
+
+ # (Optional) The base directory used for relative log_file paths. This option
+ # is ignored if log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logdir
+ #log_dir = <None>
+
+ # Uses logging handler designed to watch file system. When log file is moved or
+ # removed this handler will open a new log file with specified path
+ # instantaneously. It makes sense only if log_file option is specified and
+ # Linux platform is used. This option is ignored if log_config_append is set.
+ # (boolean value)
+ #watch_log_file = false
+
+ # Use syslog for logging. Existing syslog format is DEPRECATED and will be
+ # changed later to honor RFC5424. This option is ignored if log_config_append
+ # is set. (boolean value)
+ #use_syslog = false
+
+ # Syslog facility to receive log lines. This option is ignored if
+ # log_config_append is set. (string value)
+ #syslog_log_facility = LOG_USER
+
+ # Log output to standard error. This option is ignored if log_config_append is
+ # set. (boolean value)
+ use_stderr = true
+
+ # Format string to use for log messages with context. (string value)
+ #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+ # Format string to use for log messages when context is undefined. (string
+ # value)
+ #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+ # Additional data to append to log message when logging level for the message
+ # is DEBUG. (string value)
+ #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+ # Prefix each line of exception output with this format. (string value)
+ #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+ # Defines the format string for %(user_identity)s that is used in
+ # logging_context_format_string. (string value)
+ #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+ # List of package logging levels in logger=LEVEL pairs. This option is ignored
+ # if log_config_append is set. (list value)
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+ # Enables or disables publication of error events. (boolean value)
+ #publish_errors = false
+
+ # The format for an instance that is passed with the log message. (string
+ # value)
+ #instance_format = "[instance: %(uuid)s] "
+
+ # The format for an instance UUID that is passed with the log message. (string
+ # value)
+ #instance_uuid_format = "[instance: %(uuid)s] "
+
+ # Enables or disables fatal status of deprecations. (boolean value)
+ #fatal_deprecations = false
+
+
+ [binding]
+
+ driver = kuryr.lib.binding.drivers.vlan
+ link_iface = {{ kuryr_cni_link_interface }}
+
+ [kubernetes]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The root URL of the Kubernetes API (string value)
+ api_root = {{ openshift.master.api_url }}
+
+ # The token to talk to the k8s API
+ token_file = /etc/kuryr/token
+
+ # Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
+ ssl_ca_crt_file = /etc/kuryr/ca.crt
+
+ # HTTPS K8S_API server identity verification (boolean value)
+ # TODO (apuimedo): Make configurable
+ ssl_verify_server_crt = True
diff --git a/roles/kuryr/templates/controller-deployment.yaml.j2 b/roles/kuryr/templates/controller-deployment.yaml.j2
new file mode 100644
index 000000000..d970270b5
--- /dev/null
+++ b/roles/kuryr/templates/controller-deployment.yaml.j2
@@ -0,0 +1,40 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ name: kuryr-controller
+ name: kuryr-controller
+ namespace: {{ kuryr_namespace }}
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ name: kuryr-controller
+ name: kuryr-controller
+ spec:
+ serviceAccountName: kuryr-controller
+ automountServiceAccountToken: true
+ hostNetwork: true
+ containers:
+ - image: kuryr/controller:latest
+ imagePullPolicy: IfNotPresent
+ name: controller
+ terminationMessagePath: "/dev/termination-log"
+ # FIXME(dulek): This shouldn't be required, but without it selinux is
+ # complaining about access to kuryr.conf.
+ securityContext:
+ privileged: true
+ runAsUser: 0
+ volumeMounts:
+ - name: config-volume
+ mountPath: "/etc/kuryr/kuryr.conf"
+ subPath: kuryr.conf
+ volumes:
+ - name: config-volume
+ configMap:
+ name: kuryr-config
+ defaultMode: 0666
+ restartPolicy: Always
diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py
index d1dc4caf8..324f52689 100644
--- a/roles/lib_openshift/library/oc_adm_csr.py
+++ b/roles/lib_openshift/library/oc_adm_csr.py
@@ -1478,11 +1478,23 @@ class OCcsr(OpenShiftCLI):
return False
+ def get_csr_request(self, request):
+ '''base64 decode the request object and call openssl to determine the
+ subject and specifically the CN: from the request
+
+ Output:
+ (0, '...
+ Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal
+ ...')
+ '''
+ import base64
+ return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1]
+
def match_node(self, csr):
'''match an inc csr to a node in self.nodes'''
for node in self.nodes:
- # we have a match
- if node['name'] in csr['metadata']['name']:
+ # we need to match based upon the csr's request certificate's CN
+ if node['name'] in self.get_csr_request(csr['spec']['request']):
node['csrs'][csr['metadata']['name']] = csr
# check that the username is the node and type is 'Approved'
diff --git a/roles/lib_openshift/src/class/oc_adm_csr.py b/roles/lib_openshift/src/class/oc_adm_csr.py
index ea11c6ca9..22b8f9165 100644
--- a/roles/lib_openshift/src/class/oc_adm_csr.py
+++ b/roles/lib_openshift/src/class/oc_adm_csr.py
@@ -66,11 +66,23 @@ class OCcsr(OpenShiftCLI):
return False
+ def get_csr_request(self, request):
+ '''base64 decode the request object and call openssl to determine the
+ subject and specifically the CN: from the request
+
+ Output:
+ (0, '...
+ Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal
+ ...')
+ '''
+ import base64
+ return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1]
+
def match_node(self, csr):
'''match an inc csr to a node in self.nodes'''
for node in self.nodes:
- # we have a match
- if node['name'] in csr['metadata']['name']:
+ # we need to match based upon the csr's request certificate's CN
+ if node['name'] in self.get_csr_request(csr['spec']['request']):
node['csrs'][csr['metadata']['name']] = csr
# check that the username is the node and type is 'Approved'
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index ea09857b0..5371588cf 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -4,7 +4,6 @@ openshift_aws_create_iam_cert: True
openshift_aws_create_security_groups: True
openshift_aws_create_launch_config: True
openshift_aws_create_scale_group: True
-openshift_aws_kubernetes_cluster_status: owned # or shared
openshift_aws_node_group_type: master
openshift_aws_wait_for_ssh: True
@@ -13,6 +12,7 @@ openshift_aws_clusterid: default
openshift_aws_region: us-east-1
openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
+openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}"
openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
openshift_aws_iam_cert_path: ''
@@ -89,6 +89,10 @@ openshift_aws_node_group_config_node_volumes:
delete_on_termination: True
openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}"
+openshift_aws_node_group_termination_policy: Default
+openshift_aws_node_group_replace_instances: []
+openshift_aws_node_group_replace_all_instances: False
+openshift_aws_node_group_config_extra_labels: {}
openshift_aws_node_group_config:
tags: "{{ openshift_aws_node_group_config_tags }}"
@@ -105,7 +109,11 @@ openshift_aws_node_group_config:
tags:
host-type: master
sub-host-type: default
+ labels:
+ type: master
wait_for_instances: True
+ termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
+ replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
compute:
instance_type: m4.xlarge
ami: "{{ openshift_aws_ami }}"
@@ -119,6 +127,10 @@ openshift_aws_node_group_config:
tags:
host-type: node
sub-host-type: compute
+ labels:
+ type: compute
+ termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
+ replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
infra:
instance_type: m4.xlarge
ami: "{{ openshift_aws_ami }}"
@@ -132,6 +144,10 @@ openshift_aws_node_group_config:
tags:
host-type: node
sub-host-type: infra
+ labels:
+ type: infra
+ termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
+ replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
openshift_aws_elb_security_groups:
- "{{ openshift_aws_clusterid }}"
@@ -211,3 +227,7 @@ openshift_aws_vpc:
az: "us-east-1e"
- cidr: 172.31.16.0/20
az: "us-east-1a"
+
+openshift_aws_node_run_bootstrap_startup: True
+openshift_aws_node_user_data: ''
+openshift_aws_node_config_namespace: openshift-node
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
index e6be9969c..8b7b02a0e 100644
--- a/roles/openshift_aws/tasks/launch_config.yml
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -4,6 +4,11 @@
when:
- openshift_aws_ami is undefined
+- fail:
+ msg: "Ensure that openshift_deployment_type is defined."
+ when:
+ - openshift_deployment_type is undefined
+
- name: query vpc
ec2_vpc_net_facts:
region: "{{ openshift_aws_region }}"
@@ -27,23 +32,7 @@
image_id: "{{ openshift_aws_ami }}"
instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}"
security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
- user_data: |-
- #cloud-config
- {% if openshift_aws_node_group_type != 'master' %}
- write_files:
- - path: /root/csr_kubeconfig
- owner: root:root
- permissions: '0640'
- content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
- - path: /root/openshift_settings
- owner: root:root
- permissions: '0640'
- content:
- openshift_type: "{{ openshift_aws_node_group_type }}"
- runcmd:
- - [ systemctl, enable, atomic-openshift-node]
- - [ systemctl, start, atomic-openshift-node]
- {% endif %}
+ user_data: "{{ lookup('template', 'user_data.j2') }}"
key_name: "{{ openshift_aws_ssh_key_name }}"
ebs_optimized: False
volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}"
diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 1384bae59..25ae6ce1c 100644
--- a/roles/openshift_aws/tasks/provision_instance.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -1,4 +1,8 @@
---
+- name: set openshift_node_bootstrap to True when building AMI
+ set_fact:
+ openshift_node_bootstrap: True
+
- name: query vpc
ec2_vpc_net_facts:
region: "{{ openshift_aws_region }}"
@@ -53,10 +57,6 @@
timeout: 300
search_regex: OpenSSH
-- name: Pause 10 seconds to ensure ssh actually accepts logins
- pause:
- seconds: 20
-
- name: add host to nodes
add_host:
groups: nodes
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
index 3e969fc43..eb31636e7 100644
--- a/roles/openshift_aws/tasks/scale_group.yml
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -28,5 +28,7 @@
load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}"
vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
+ replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}"
+ replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (openshift_aws_node_group_config[openshift_aws_node_group_type].replace_all_instances | default(omit)) }}"
tags:
- "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index 0cb749dcc..d319fdd1a 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -1,4 +1,11 @@
---
+- name: Remove any ansible facts created during AMI creation
+ file:
+ path: "/etc/ansible/facts.d/{{ item }}"
+ state: absent
+ with_items:
+ - openshift.fact
+
- name: fetch newly created instances
ec2_remote_facts:
region: "{{ openshift_aws_region }}"
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
new file mode 100644
index 000000000..ed9c0ed0b
--- /dev/null
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -0,0 +1,26 @@
+{% if openshift_aws_node_user_data is defined and openshift_aws_node_user_data != '' %}
+{{ openshift_aws_node_user_data }}
+{% else %}
+#cloud-config
+write_files:
+- path: /root/openshift_bootstrap/openshift_settings.yaml
+ owner: 'root:root'
+ permissions: '0640'
+ content: |
+ openshift_group_type: {{ openshift_aws_node_group_type }}
+{% if openshift_aws_node_group_type != 'master' %}
+- path: /etc/origin/node/csr_kubeconfig
+ owner: 'root:root'
+ permissions: '0640'
+ encoding: b64
+ content: {{ openshift_aws_launch_config_bootstrap_token | b64encode }}
+{% endif %}
+runcmd:
+{% if openshift_aws_node_run_bootstrap_startup %}
+- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
+{% endif %}
+{% if openshift_aws_node_group_type != 'master' %}
+- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
+- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
+{% endif %}
+{% endif %}
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index ba1d8f29d..33028fea4 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -498,6 +498,20 @@ def set_selectors(facts):
facts['hosted']['etcd'] = {}
if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
facts['hosted']['etcd']['selector'] = None
+ if 'prometheus' not in facts:
+ facts['prometheus'] = {}
+ if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']:
+ facts['prometheus']['selector'] = None
+ if 'alertmanager' not in facts['prometheus']:
+ facts['prometheus']['alertmanager'] = {}
+ # pylint: disable=line-too-long
+ if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']:
+ facts['prometheus']['alertmanager']['selector'] = None
+ if 'alertbuffer' not in facts['prometheus']:
+ facts['prometheus']['alertbuffer'] = {}
+ # pylint: disable=line-too-long
+ if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']:
+ facts['prometheus']['alertbuffer']['selector'] = None
return facts
@@ -1779,7 +1793,8 @@ class OpenShiftFacts(object):
'node',
'logging',
'loggingops',
- 'metrics']
+ 'metrics',
+ 'prometheus']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
@@ -2068,6 +2083,66 @@ class OpenShiftFacts(object):
)
)
+ defaults['prometheus'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='prometheus',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
+ defaults['prometheus']['alertmanager'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='prometheus-alertmanager',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
+ defaults['prometheus']['alertbuffer'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='prometheus-alertbuffer',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
return defaults
def guess_host_provider(self):
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
index d72a11de1..64c7cd019 100644
--- a/roles/openshift_gcp/templates/provision.j2.sh
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -313,7 +313,7 @@ fi
# wait until all node groups are stable
{% for node_group in openshift_gcp_node_group_config %}
# wait for stable {{ node_group.name }}
-( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=300) &
+( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) &
{% endfor %}
diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml
index 47dc9171d..8fc70cecb 100644
--- a/roles/openshift_hosted_facts/tasks/main.yml
+++ b/roles/openshift_hosted_facts/tasks/main.yml
@@ -16,4 +16,4 @@
| oo_openshift_env }}"
openshift_env_structures:
- 'openshift.hosted.router.*'
- with_items: [hosted, logging, loggingops, metrics]
+ with_items: [hosted, logging, loggingops, metrics, prometheus]
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
index 554aa5bb2..fc48b7f71 100644
--- a/roles/openshift_logging_elasticsearch/defaults/main.yml
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -40,8 +40,6 @@ openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_
# config the es plugin to write kibana index based on the index mode
openshift_logging_elasticsearch_kibana_index_mode: 'unique'
-openshift_logging_elasticsearch_proxy_image_prefix: "openshift/oauth-proxy"
-openshift_logging_elasticsearch_proxy_image_version: "v1.0.0"
openshift_logging_elasticsearch_proxy_cpu_limit: "100m"
openshift_logging_elasticsearch_proxy_memory_limit: "64Mi"
openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index df2c17aa0..aeff2d198 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -17,6 +17,17 @@
- include: determine_version.yaml
+- name: Set default image variables based on deployment_type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: Set elasticsearch_prefix image facts
+ set_fact:
+ openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_elasticsearch_proxy_image_prefix | default(__openshift_logging_elasticsearch_proxy_image_prefix) }}"
+ openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_elasticsearch_proxy_image_version | default(__openshift_logging_elasticsearch_proxy_image_version) }}"
+
# allow passing in a tempdir
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
@@ -52,7 +63,7 @@
name: "aggregated-logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
when:
- - openshift_logging_image_pull_secret == ''
+ - openshift_logging_image_pull_secret == ''
# rolebinding reader
- copy:
@@ -66,7 +77,7 @@
kind: clusterrole
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - "{{ tempdir }}/rolebinding-reader.yml"
+ - "{{ tempdir }}/rolebinding-reader.yml"
delete_after: true
# SA roles
@@ -107,8 +118,8 @@
- fail:
msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}"
when:
- - "prometheus_out.stderr | length > 0"
- - "'already exists' not in prometheus_out.stderr"
+ - "prometheus_out.stderr | length > 0"
+ - "'already exists' not in prometheus_out.stderr"
# View role and binding
- name: Generate logging-elasticsearch-view-role
@@ -120,8 +131,8 @@
roleRef:
name: view
subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
changed_when: no
- name: Set logging-elasticsearch-view-role role
@@ -131,18 +142,18 @@
kind: rolebinding
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
+ - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
delete_after: true
# configmap
- assert:
that:
- - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
+ - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
msg: "The openshift_logging_elasticsearch_kibana_index_mode '{{ openshift_logging_elasticsearch_kibana_index_mode }}' only supports one of: {{ __kibana_index_modes | join(', ') }}"
- assert:
that:
- - "{{ openshift_logging_es_log_appenders | length > 0 }}"
+ - "{{ openshift_logging_es_log_appenders | length > 0 }}"
msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}"
- template:
@@ -194,22 +205,22 @@
name: "logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - name: key
- path: "{{ generated_certs_dir }}/logging-es.jks"
- - name: truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: searchguard.key
- path: "{{ generated_certs_dir }}/elasticsearch.jks"
- - name: searchguard.truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: admin-key
- path: "{{ generated_certs_dir }}/system.admin.key"
- - name: admin-cert
- path: "{{ generated_certs_dir }}/system.admin.crt"
- - name: admin-ca
- path: "{{ generated_certs_dir }}/ca.crt"
- - name: admin.jks
- path: "{{ generated_certs_dir }}/system.admin.jks"
+ - name: key
+ path: "{{ generated_certs_dir }}/logging-es.jks"
+ - name: truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: searchguard.key
+ path: "{{ generated_certs_dir }}/elasticsearch.jks"
+ - name: searchguard.truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: admin-key
+ path: "{{ generated_certs_dir }}/system.admin.key"
+ - name: admin-cert
+ path: "{{ generated_certs_dir }}/system.admin.crt"
+ - name: admin-ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: admin.jks
+ path: "{{ generated_certs_dir }}/system.admin.jks"
# services
- name: Set logging-{{ es_component }}-cluster service
@@ -223,7 +234,7 @@
labels:
logging-infra: 'support'
ports:
- - port: 9300
+ - port: 9300
- name: Set logging-{{ es_component }} service
oc_service:
@@ -236,8 +247,8 @@
labels:
logging-infra: 'support'
ports:
- - port: 9200
- targetPort: "restapi"
+ - port: 9200
+ targetPort: "restapi"
- name: Set logging-{{ es_component}}-prometheus service
oc_service:
@@ -247,9 +258,9 @@
labels:
logging-infra: 'support'
ports:
- - name: proxy
- port: 443
- targetPort: 4443
+ - name: proxy
+ port: 443
+ targetPort: 4443
selector:
component: "{{ es_component }}-prometheus"
provider: openshift
@@ -277,46 +288,46 @@
# so we check for the presence of 'stderr' to determine if the obj exists or not
# the RC for existing and not existing is both 0
- when:
- - logging_elasticsearch_pvc.results.stderr is defined
- - openshift_logging_elasticsearch_storage_type == "pvc"
+ - logging_elasticsearch_pvc.results.stderr is defined
+ - openshift_logging_elasticsearch_storage_type == "pvc"
block:
- # storageclasses are used by default but if static then disable
- # storageclasses with the storageClassName set to "" in pvc.j2
- - name: Creating ES storage template - static
- template:
- src: pvc.j2
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
- when:
- - not openshift_logging_elasticsearch_pvc_dynamic | bool
-
- # Storageclasses are used by default if configured
- - name: Creating ES storage template - dynamic
- template:
- src: pvc.j2
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- when:
- - openshift_logging_elasticsearch_pvc_dynamic | bool
-
- - name: Set ES storage
- oc_obj:
- state: present
- kind: pvc
- name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- files:
- - "{{ tempdir }}/templates/logging-es-pvc.yml"
- delete_after: true
+ # storageclasses are used by default but if static then disable
+ # storageclasses with the storageClassName set to "" in pvc.j2
+ - name: Creating ES storage template - static
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
+ when:
+ - not openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ # Storageclasses are used by default if configured
+ - name: Creating ES storage template - dynamic
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ when:
+ - openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ - name: Set ES storage
+ oc_obj:
+ state: present
+ kind: pvc
+ name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/templates/logging-es-pvc.yml"
+ delete_after: true
- set_fact:
es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
@@ -337,6 +348,7 @@
logging_component: elasticsearch
deploy_name: "{{ es_deploy_name }}"
image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"
+ proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}"
es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
@@ -352,7 +364,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: dc
files:
- - "{{ tempdir }}/templates/logging-es-dc.yml"
+ - "{{ tempdir }}/templates/logging-es-dc.yml"
delete_after: true
- name: Retrieving the cert to use when generating secrets for the {{ es_component }} component
@@ -360,37 +372,37 @@
src: "{{ generated_certs_dir }}/{{ item.file }}"
register: key_pairs
with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "es_key", file: "system.logging.es.key" }
- - { name: "es_cert", file: "system.logging.es.crt" }
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "es_key", file: "system.logging.es.key" }
+ - { name: "es_cert", file: "system.logging.es.crt" }
when: openshift_logging_es_allow_external | bool
- set_fact:
es_key: "{{ lookup('file', openshift_logging_es_key) | b64encode }}"
when:
- - openshift_logging_es_key | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_key | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_cert: "{{ lookup('file', openshift_logging_es_cert) | b64encode }}"
when:
- - openshift_logging_es_cert | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_cert | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ lookup('file', openshift_logging_es_ca_ext) | b64encode }}"
when:
- - openshift_logging_es_ca_ext | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_ca_ext | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}"
when:
- - es_ca is not defined
- - openshift_logging_es_allow_external | bool
+ - es_ca is not defined
+ - openshift_logging_es_allow_external | bool
changed_when: false
- name: Generating Elasticsearch {{ es_component }} route template
@@ -421,7 +433,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: route
files:
- - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
+ - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
when: openshift_logging_es_allow_external | bool
## Placeholder for migration when necessary ##
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 1ed886627..ce3b2eb83 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -40,7 +40,7 @@ spec:
{% endif %}
containers:
- name: proxy
- image: {{openshift_logging_elasticsearch_proxy_image_prefix}}:{{openshift_logging_elasticsearch_proxy_image_version}}
+ image: {{ proxy_image }}
imagePullPolicy: Always
args:
- --upstream-ca=/etc/elasticsearch/secret/admin-ca
@@ -86,7 +86,7 @@ spec:
requests:
memory: "{{es_memory_limit}}"
{% if es_container_security_context %}
- securityContext: {{ es_container_security_context | to_yaml }}
+ securityContext: {{ es_container_security_context | to_yaml }}
{% endif %}
ports:
-
diff --git a/roles/openshift_logging_elasticsearch/vars/default_images.yml b/roles/openshift_logging_elasticsearch/vars/default_images.yml
new file mode 100644
index 000000000..b7d105caf
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/vars/default_images.yml
@@ -0,0 +1,3 @@
+---
+__openshift_logging_elasticsearch_proxy_image_prefix: "docker.io/openshift/"
+__openshift_logging_elasticsearch_proxy_image_version: "v1.0.0"
diff --git a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..c87d48e27
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
@@ -0,0 +1,3 @@
+---
+__openshift_logging_elasticsearch_proxy_image_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_logging_elasticsearch_proxy_image_version: "v3.7"
diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
index 9ff4c7e80..ea1fd3efd 100644
--- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
+++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
@@ -54,9 +54,9 @@ objects:
serviceAccount: aggregated-logging-eventrouter
serviceAccountName: aggregated-logging-eventrouter
{% if node_selector is iterable and node_selector | length > 0 %}
- nodeSelector:
+ nodeSelector:
{% for key, value in node_selector.iteritems() %}
- {{ key }}: "{{ value }}"
+ {{ key }}: "{{ value }}"
{% endfor %}
{% endif %}
containers:
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index f861a8e4d..3da861d03 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -46,6 +46,9 @@ r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}"
r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}"
+r_openshift_master_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+r_openshift_master_use_kuryr: "{{ r_openshift_master_use_kuryr_default }}"
+
r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}"
@@ -54,3 +57,88 @@ r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_p
openshift_master_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
openshift_master_image_config_latest: "{{ openshift_master_image_config_latest_default }}"
+
+openshift_master_config_dir_default: "{{ (openshift.common.config_base | default('/etc/origin/master')) ~ '/master' }}"
+openshift_master_config_dir: "{{ openshift_master_config_dir_default }}"
+openshift_master_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}"
+
+openshift_master_node_config_networkconfig_mtu: 1450
+
+openshift_master_node_config_kubeletargs_cpu: 500m
+openshift_master_node_config_kubeletargs_mem: 512M
+
+openshift_master_bootstrap_enabled: False
+
+openshift_master_client_binary: "{{ openshift.common.client_binary if openshift is defined else 'oc' }}"
+
+openshift_master_config_imageconfig_format: "{{ oreg_url if oreg_url != '' else 'registry.access.redhat.com/openshift3/ose-${component}:${version}' }}"
+
+# these are for the default settings in a generated node-config.yaml
+openshift_master_node_config_default_edits:
+- key: nodeName
+ state: absent
+- key: dnsBindAddress
+ value: 127.0.0.1:53
+- key: dnsDomain
+ value: cluster.local
+- key: dnsRecursiveResolvConf
+ value: /etc/origin/node/resolv.conf
+- key: imageConfig.format
+ value: "{{ openshift_master_config_imageconfig_format }}"
+- key: kubeletArguments.cloud-config
+ value:
+ - "/etc/origin/cloudprovider/{{ openshift_master_cloud_provider }}.conf"
+- key: kubeletArguments.cloud-provider
+ value:
+ - "{{ openshift_master_cloud_provider }}"
+- key: kubeletArguments.kube-reserved
+ value:
+ - "cpu={{ openshift_master_node_config_kubeletargs_cpu }},memory={{ openshift_master_node_config_kubeletargs_mem }}"
+- key: kubeletArguments.system-reserved
+ value:
+ - "cpu={{ openshift_master_node_config_kubeletargs_cpu }},memory={{ openshift_master_node_config_kubeletargs_mem }}"
+- key: enable-controller-attach-detach
+ value:
+ - 'true'
+- key: networkConfig.mtu
+ value: 8951
+- key: networkConfig.networkPluginName
+ value: "{{ r_openshift_master_sdn_network_plugin_name }}"
+- key: networkPluginName
+ value: "{{ r_openshift_master_sdn_network_plugin_name }}"
+
+
+# We support labels for all nodes here
+openshift_master_node_config_kubeletargs_default_labels: []
+# We do support overrides for node group labels
+openshift_master_node_config_kubeletargs_master_labels: []
+openshift_master_node_config_kubeletargs_infra_labels: []
+openshift_master_node_config_kubeletargs_compute_labels: []
+
+openshift_master_node_config_master:
+ type: master
+ edits:
+ - key: kubeletArguments.node-labels
+ value: "{{ openshift_master_node_config_kubeletargs_default_labels |
+ union(openshift_master_node_config_kubeletargs_master_labels) |
+ union(['type=master']) }}"
+openshift_master_node_config_infra:
+ type: infra
+ edits:
+ - key: kubeletArguments.node-labels
+ value: "{{ openshift_master_node_config_kubeletargs_default_labels |
+ union(openshift_master_node_config_kubeletargs_infra_labels) |
+ union(['type=infra']) }}"
+openshift_master_node_config_compute:
+ type: compute
+ edits:
+ - key: kubeletArguments.node-labels
+ value: "{{ openshift_master_node_config_kubeletargs_default_labels |
+ union(openshift_master_node_config_kubeletargs_compute_labels) |
+ union(['type=compute']) }}"
+
+openshift_master_node_configs:
+- "{{ openshift_master_node_config_infra }}"
+- "{{ openshift_master_node_config_compute }}"
+
+openshift_master_bootstrap_namespace: openshift-node
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index a657668a9..a1cda2ad4 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -13,4 +13,5 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: lib_utils
- role: lib_os_firewall
diff --git a/roles/openshift_master/tasks/bootstrap.yml b/roles/openshift_master/tasks/bootstrap.yml
index 0013f5289..eee89743c 100644
--- a/roles/openshift_master/tasks/bootstrap.yml
+++ b/roles/openshift_master/tasks/bootstrap.yml
@@ -26,3 +26,66 @@
copy:
content: "{{ kubeconfig_out.stdout }}"
dest: "{{ openshift_master_config_dir }}/bootstrap.kubeconfig"
+
+- name: create a temp dir for this work
+ command: mktemp -d /tmp/openshift_node_config-XXXXXX
+ register: mktempout
+ run_once: true
+
+# This generate is so that we do not have to maintain
+# our own copy of the template. This is generated by
+# the product and the following settings will be
+# generated by the master
+- name: generate a node-config dynamically
+ command: >
+ {{ openshift_master_client_binary }} adm create-node-config
+ --node-dir={{ mktempout.stdout }}/
+ --node=CONFIGMAP
+ --hostnames=test
+ --certificate-authority={{ openshift_master_config_dir }}/ca.crt
+ --signer-cert={{ openshift_master_config_dir }}/ca.crt
+ --signer-key={{ openshift_master_config_dir }}/ca.key
+ --signer-serial={{ openshift_master_config_dir }}/ca.serial.txt
+ --node-client-certificate-authority={{ openshift_master_config_dir }}/ca.crt
+ register: configgen
+ run_once: true
+
+- name: remove the default settings
+ yedit:
+ state: "{{ item.state | default('present') }}"
+ src: "{{ mktempout.stdout }}/node-config.yaml"
+ key: "{{ item.key }}"
+ value: "{{ item.value | default(omit) }}"
+ with_items: "{{ openshift_master_node_config_default_edits }}"
+ run_once: true
+
+- name: copy the generated config into each group
+ copy:
+ src: "{{ mktempout.stdout }}/node-config.yaml"
+ remote_src: true
+ dest: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml"
+ with_items: "{{ openshift_master_node_configs }}"
+ run_once: true
+
+- name: "specialize the generated configs for node-config-{{ item.type }}"
+ yedit:
+ src: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml"
+ edits: "{{ item.edits }}"
+ with_items: "{{ openshift_master_node_configs }}"
+ run_once: true
+
+- name: create node-config.yaml configmap
+ oc_configmap:
+ name: "node-config-{{ item.type }}"
+ namespace: "{{ openshift_master_bootstrap_namespace }}"
+ from_file:
+ node-config.yaml: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml"
+ with_items: "{{ openshift_master_node_configs }}"
+ run_once: true
+
+- name: remove templated files
+ file:
+ dest: "{{ mktempout.stdout }}/"
+ state: absent
+ with_items: "{{ openshift_master_node_configs }}"
+ run_once: true
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 7159ccc7f..40775571f 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -179,7 +179,7 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_sdn_network_plugin_name == 'cni' %}
+{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 739b0d968..b9f16dfd4 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -4,7 +4,8 @@ openshift_node_debug_level: "{{ debug_level | default(2) }}"
r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-openshift_service_type: "{{ openshift.common.service_type }}"
+openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
+openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"
openshift_image_tag: ''
@@ -17,7 +18,6 @@ openshift_node_ami_prep_packages:
- openvswitch
- docker
- etcd
-#- pcs
- haproxy
- dnsmasq
- ntp
@@ -54,7 +54,6 @@ openshift_node_ami_prep_packages:
# - container-selinux
# - atomic
#
-openshift_deployment_type: origin
openshift_node_bootstrap: False
@@ -105,6 +104,9 @@ openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}"
openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}"
+openshift_node_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}"
+
openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
diff --git a/roles/openshift_node/files/bootstrap.yml b/roles/openshift_node/files/bootstrap.yml
new file mode 100644
index 000000000..ea280640f
--- /dev/null
+++ b/roles/openshift_node/files/bootstrap.yml
@@ -0,0 +1,63 @@
+#!/usr/bin/ansible-playbook
+---
+- hosts: localhost
+ gather_facts: yes
+ vars:
+ origin_dns:
+ file: /etc/dnsmasq.d/origin-dns.conf
+ lines:
+ - regex: ^listen-address
+ state: present
+ line: "listen-address={{ ansible_default_ipv4.address }}"
+ node_dns:
+ file: /etc/dnsmasq.d/node-dnsmasq.conf
+ lines:
+ - regex: "^server=/in-addr.arpa/127.0.0.1$"
+ line: server=/in-addr.arpa/127.0.0.1
+ - regex: "^server=/cluster.local/127.0.0.1$"
+ line: server=/cluster.local/127.0.0.1
+
+ tasks:
+ - include_vars: openshift_settings.yaml
+
+ - name: set the data for node_dns
+ lineinfile:
+ create: yes
+ insertafter: EOF
+ path: "{{ node_dns.file }}"
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line | default(omit) }}"
+ with_items: "{{ node_dns.lines }}"
+
+ - name: set the data for origin_dns
+ lineinfile:
+ create: yes
+ state: "{{ item.state | default('present') }}"
+ insertafter: "{{ item.after | default(omit) }}"
+ path: "{{ origin_dns.file }}"
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line | default(omit)}}"
+ with_items: "{{ origin_dns.lines }}"
+
+ - when:
+ - openshift_group_type is defined
+ - openshift_group_type != ''
+ - openshift_group_type != 'master'
+ block:
+ - name: determine the openshift_service_type
+ stat:
+ path: /etc/sysconfig/atomic-openshift-node
+ register: service_type_results
+
+ - name: set openshift_service_type fact based on stat results
+ set_fact:
+ openshift_service_type: "{{ service_type_results.stat.exists | ternary('atomic-openshift', 'origin') }}"
+
+ - name: update the sysconfig to have necessary variables
+ lineinfile:
+ dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
+ line: "{{ item.line }}"
+ regexp: "{{ item.regexp }}"
+ with_items:
+ - line: "BOOTSTRAP_CONFIG_NAME=node-config-{{ openshift_group_type }}"
+ regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 25a6fc721..b102c1b18 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -3,7 +3,11 @@
systemd:
name: openvswitch
state: restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift_node_use_openshift_sdn | bool
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
+ - not (ovs_service_status_changed | default(false) | bool)
+ - openshift_node_use_openshift_sdn | bool
+ - not openshift_node_bootstrap
register: l_openshift_node_stop_openvswitch_result
until: not l_openshift_node_stop_openvswitch_result | failed
retries: 3
@@ -11,10 +15,11 @@
notify:
- restart openvswitch pause
-
- name: restart openvswitch pause
pause: seconds=15
- when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
+ - openshift.common.is_containerized | bool
- name: restart node
systemd:
diff --git a/roles/openshift_node/tasks/aws.yml b/roles/openshift_node/tasks/aws.yml
new file mode 100644
index 000000000..38c2b794d
--- /dev/null
+++ b/roles/openshift_node/tasks/aws.yml
@@ -0,0 +1,21 @@
+---
+- name: Configure AWS Cloud Provider Settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ create: true
+ with_items:
+ - regex: '^AWS_ACCESS_KEY_ID='
+ line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
+ - regex: '^AWS_SECRET_ACCESS_KEY='
+ line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
+ register: sys_env_update
+ no_log: True
+ when:
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind == 'aws'
+ - openshift_cloudprovider_aws_access_key is defined
+ - openshift_cloudprovider_aws_secret_key is defined
+ notify:
+ - restart node
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index 6bd2df362..8c03f6c41 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -17,17 +17,29 @@
[Unit]
After=cloud-init.service
-- name: update the sysconfig to have KUBECONFIG
+- name: update the sysconfig to have necessary variables
lineinfile:
dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
- line: "KUBECONFIG=/root/csr_kubeconfig"
+ line: "{{ item.line | default(omit) }}"
+ regexp: "{{ item.regexp }}"
+ state: "{{ item.state | default('present') }}"
+ with_items:
+ # add the kubeconfig
+ - line: "KUBECONFIG=/etc/origin/node/csr_kubeconfig"
regexp: "^KUBECONFIG=.*"
+ # remove the config file. This comes from openshift_facts
+ - regexp: "^CONFIG_FILE=.*"
+ state: absent
-- name: update the ExecStart to have bootstrap
- lineinfile:
- dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service"
- line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}"
- regexp: "^ExecStart=.*"
+- name: include aws sysconfig credentials
+ include: aws.yml
+ static: yes
+
+#- name: update the ExecStart to have bootstrap
+# lineinfile:
+# dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service"
+# line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}"
+# regexp: "^ExecStart=.*"
- name: "disable {{ openshift_service_type }}-node and {{ openshift_service_type }}-master services"
systemd:
@@ -42,6 +54,30 @@
path: /etc/origin/.config_managed
register: rpmgenerated_config
+- name: create directories for bootstrapping
+ file:
+ state: directory
+ dest: "{{ item }}"
+ with_items:
+ - /root/openshift_bootstrap
+ - /var/lib/origin/openshift.local.config
+ - /var/lib/origin/openshift.local.config/node
+ - "/etc/docker/certs.d/docker-registry.default.svc:5000"
+
+- name: laydown the bootstrap.yml file for on boot configuration
+ copy:
+ src: bootstrap.yml
+ dest: /root/openshift_bootstrap/bootstrap.yml
+
+- name: symlink master ca for docker-registry
+ file:
+ src: "{{ item }}"
+ dest: "/etc/docker/certs.d/docker-registry.default.svc:5000/{{ item | basename }}"
+ state: link
+ force: yes
+ with_items:
+ - /var/lib/origin/openshift.local.config/node/node-client-ca.crt
+
- when: rpmgenerated_config.stat.exists
block:
- name: Remove RPM generated config files if present
@@ -50,6 +86,7 @@
state: absent
with_items:
- master
+ - .config_managed
# with_fileglob doesn't work correctly due to a few issues.
# Could change this to fileglob when it gets fixed.
@@ -62,5 +99,7 @@
file:
path: "{{ item.path }}"
state: absent
- when: "'resolv.conf' not in item.path or 'node-dnsmasq.conf' not in item.path"
+ when:
+ - "'resolv.conf' not in item.path"
+ - "'node-dnsmasq.conf' not in item.path"
with_items: "{{ find_results.files }}"
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index e5fcaf9af..c08f43118 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -46,26 +46,16 @@
notify:
- restart node
-- name: Configure AWS Cloud Provider Settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^AWS_ACCESS_KEY_ID='
- line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
- - regex: '^AWS_SECRET_ACCESS_KEY='
- line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
- no_log: True
- when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined
- notify:
- - restart node
+- name: include aws provider credentials
+ include: aws.yml
+ static: yes
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
# systemd to start the master again
-- when: openshift.common.is_containerized | bool
+- when:
+ - openshift.common.is_containerized | bool
+ - not openshift_node_bootstrap
block:
- name: Wait for master API to become available before proceeding
# Using curl here since the uri module requires python-httplib2 and
@@ -90,26 +80,28 @@
enabled: yes
state: started
-- name: Start and enable node
- systemd:
- name: "{{ openshift.common.service_type }}-node"
- enabled: yes
- state: started
- daemon_reload: yes
- register: node_start_result
- until: not node_start_result | failed
- retries: 1
- delay: 30
- ignore_errors: true
+- when: not openshift_node_bootstrap
+ block:
+ - name: Start and enable node
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: node_start_result
+ until: not node_start_result | failed
+ retries: 1
+ delay: 30
+ ignore_errors: true
-- name: Dump logs from node service if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
- when: node_start_result | failed
+ - name: Dump logs from node service if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
+ when: node_start_result | failed
-- name: Abort if node failed to start
- fail:
- msg: Node failed to start please inspect the logs and try again
- when: node_start_result | failed
+ - name: Abort if node failed to start
+ fail:
+ msg: Node failed to start please inspect the logs and try again
+ when: node_start_result | failed
-- set_fact:
- node_service_status_changed: "{{ node_start_result | changed }}"
+ - set_fact:
+ node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 1539d6e3b..6b7e40491 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -3,12 +3,12 @@
block:
- name: Install Node package
package:
- name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- name: Install sdn-ovs package
package:
- name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift.common.service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- openshift_node_use_openshift_sdn | bool
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 59b8bb76e..eae9ca7bc 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -66,15 +66,10 @@
sysctl_file: "/etc/sysctl.d/99-openshift.conf"
reload: yes
-- name: include bootstrap node config
- include: bootstrap.yml
- when: openshift_node_bootstrap
-
- include: registry_auth.yml
- name: include standard node config
include: config.yml
- when: not openshift_node_bootstrap
#### Storage class plugins here ####
- name: NFS storage plugin configuration
@@ -98,3 +93,7 @@
- include: config/workaround-bz1331590-ovs-oom-fix.yml
when: openshift_node_use_openshift_sdn | default(true) | bool
+
+- name: include bootstrap node config
+ include: bootstrap.yml
+ when: openshift_node_bootstrap
diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2
index 0856737f6..7602d8ee6 100644
--- a/roles/openshift_node/templates/node.service.j2
+++ b/roles/openshift_node/templates/node.service.j2
@@ -12,17 +12,17 @@ After=dnsmasq.service
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node
Environment=GOTRACEBACK=crash
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/openshift start node {% if openshift_node_bootstrap %} --kubeconfig=${KUBECONFIG} --bootstrap-config-name=${BOOTSTRAP_CONFIG_NAME}{% endif %} --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=65536
LimitCORE=infinity
WorkingDirectory=/var/lib/origin/
-SyslogIdentifier={{ openshift.common.service_type }}-node
+SyslogIdentifier={{ openshift_service_type }}-node
Restart=always
RestartSec=5s
TimeoutStartSec=300
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 08e1c7f4f..718d35dca 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -44,7 +44,7 @@ networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
+{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_use_kuryr | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
@@ -67,9 +67,11 @@ servingInfo:
{% endfor %}
{% endif %}
volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes
+{% if not (openshift_node_use_kuryr | default(False)) | bool %}
proxyArguments:
proxy-mode:
- {{ openshift.node.proxy_mode }}
+{% endif %}
volumeConfig:
localQuota:
perFSGroup: {{ openshift.node.local_quota_per_fsgroup }}
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index 5aa8aecec..c08bec4cb 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -10,50 +10,30 @@ openshift_prometheus_node_selector: {"region":"infra"}
# images
openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
-openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev"
+openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:v0.9.1"
openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1"
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
-# All the required exports
-openshift_prometheus_pv_exports:
- - prometheus
- - prometheus-alertmanager
- - prometheus-alertbuffer
-# PV template files and their created object names
-openshift_prometheus_pv_data:
- - pv_name: prometheus
- pv_template: prom-pv-server.yml
- pv_label: Prometheus Server PV
- - pv_name: prometheus-alertmanager
- pv_template: prom-pv-alertmanager.yml
- pv_label: Prometheus Alertmanager PV
- - pv_name: prometheus-alertbuffer
- pv_template: prom-pv-alertbuffer.yml
- pv_label: Prometheus Alert Buffer PV
-
-# Hostname/IP of the NFS server. Currently defaults to first master
-openshift_prometheus_nfs_server: "{{ groups.nfs.0 }}"
-
# storage
openshift_prometheus_storage_type: pvc
openshift_prometheus_pvc_name: prometheus
-openshift_prometheus_pvc_size: 10G
+openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}"
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_pvc_pv_selector: {}
+openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}"
openshift_prometheus_alertmanager_storage_type: pvc
openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
-openshift_prometheus_alertmanager_pvc_size: 10G
+openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_alertmanager_pvc_pv_selector: {}
+openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}"
openshift_prometheus_alertbuffer_storage_type: pvc
openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
-openshift_prometheus_alertbuffer_pvc_size: 10G
+openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_alertbuffer_pvc_pv_selector: {}
+openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}"
# container resources
openshift_prometheus_cpu_limit: null
diff --git a/roles/openshift_prometheus/files/openshift_prometheus.exports b/roles/openshift_prometheus/files/openshift_prometheus.exports
deleted file mode 100644
index 3ccedb1fd..000000000
--- a/roles/openshift_prometheus/files/openshift_prometheus.exports
+++ /dev/null
@@ -1,3 +0,0 @@
-/exports/prometheus *(rw,no_root_squash,no_wdelay)
-/exports/prometheus-alertmanager *(rw,no_root_squash,no_wdelay)
-/exports/prometheus-alertbuffer *(rw,no_root_squash,no_wdelay)
diff --git a/roles/openshift_prometheus/tasks/create_pvs.yaml b/roles/openshift_prometheus/tasks/create_pvs.yaml
deleted file mode 100644
index 4e79da05f..000000000
--- a/roles/openshift_prometheus/tasks/create_pvs.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-# Check for existance and then conditionally:
-# - evaluate templates
-# - PVs
-#
-# These tasks idempotently create required Prometheus PV objects. Do not
-# call this file directly. This file is intended to be ran as an
-# include that has a 'with_items' attached to it. Hence the use below
-# of variables like "{{ item.pv_label }}"
-
-- name: "Check if the {{ item.pv_label }} template has been created already"
- oc_obj:
- namespace: "{{ openshift_prometheus_namespace }}"
- state: list
- kind: pv
- name: "{{ item.pv_name }}"
- register: prom_pv_check
-
-# Skip all of this if the PV already exists
-- block:
- - name: "Ensure the {{ item.pv_label }} template is evaluated"
- template:
- src: "{{ item.pv_template }}.j2"
- dest: "{{ tempdir }}/templates/{{ item.pv_template }}"
-
- - name: "Ensure {{ item.pv_label }} is created"
- oc_obj:
- namespace: "{{ openshift_prometheus_namespace }}"
- kind: pv
- name: "{{ item.pv_name }}"
- state: present
- delete_after: True
- files:
- - "{{ tempdir }}/templates/{{ item.pv_template }}"
- when:
- - not prom_pv_check.results.results.0
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index a9bce2fb1..cb75eedca 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -54,15 +54,6 @@
resource_name: cluster-reader
user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus"
-
-######################################################################
-# NFS
-# In the case that we are not running on a cloud provider, volumes must be statically provisioned
-
-- include: nfs.yaml
- when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
-
-
# create prometheus and alerts services
# TODO join into 1 task with loop
- name: Create prometheus service
diff --git a/roles/openshift_prometheus/tasks/nfs.yaml b/roles/openshift_prometheus/tasks/nfs.yaml
deleted file mode 100644
index 0b45f2cee..000000000
--- a/roles/openshift_prometheus/tasks/nfs.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-# Tasks to statically provision NFS volumes
-# Include if not using dynamic volume provisioning
-- name: Ensure the /exports/ directory exists
- file:
- path: /exports/
- state: directory
- mode: 0755
- owner: root
- group: root
-
-- name: Ensure the prom-pv0X export directories exist
- file:
- path: "/exports/{{ item }}"
- state: directory
- mode: 0777
- owner: nfsnobody
- group: nfsnobody
- with_items: "{{ openshift_prometheus_pv_exports }}"
-
-- name: Ensure the NFS exports for Prometheus PVs exist
- copy:
- src: openshift_prometheus.exports
- dest: /etc/exports.d/openshift_prometheus.exports
- register: nfs_exports_updated
-
-- name: Ensure the NFS export table is refreshed if exports were added
- command: exportfs -ar
- when:
- - nfs_exports_updated.changed
-
-
-######################################################################
-# Create the required Prometheus PVs. Check out these online docs if you
-# need a refresher on includes looping with items:
-# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0
-# * http://stackoverflow.com/a/35128533
-#
-# TODO: Handle the case where a PV template is updated in
-# openshift-ansible and the change needs to be landed on the managed
-# cluster.
-
-- include: create_pvs.yaml
- with_items: "{{ openshift_prometheus_pv_data }}"
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
deleted file mode 100644
index 55a5e19c3..000000000
--- a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: prometheus-alertbuffer
- labels:
- storage: prometheus-alertbuffer
-spec:
- capacity:
- storage: 15Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /exports/prometheus-alertbuffer
- server: {{ openshift_prometheus_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
deleted file mode 100644
index 4ee518735..000000000
--- a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: prometheus-alertmanager
- labels:
- storage: prometheus-alertmanager
-spec:
- capacity:
- storage: 15Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /exports/prometheus-alertmanager
- server: {{ openshift_prometheus_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
deleted file mode 100644
index 933bf0f60..000000000
--- a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: prometheus
- labels:
- storage: prometheus
-spec:
- capacity:
- storage: 15Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /exports/prometheus
- server: {{ openshift_prometheus_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prometheus_deployment.j2 b/roles/openshift_prometheus/templates/prometheus_deployment.j2
index 98c117f19..66eab6df4 100644
--- a/roles/openshift_prometheus/templates/prometheus_deployment.j2
+++ b/roles/openshift_prometheus/templates/prometheus_deployment.j2
@@ -38,7 +38,7 @@ spec:
cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
{% endif %}
limits:
-{% if openshift_prometheus_memory_requests_limit_proxy is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
+{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
{% endif %}
{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
index 71e21a269..56b2d1463 100644
--- a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
+++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
@@ -4,22 +4,23 @@ metadata:
name: service-catalog
objects:
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: servicecatalog-serviceclass-viewer
rules:
- apiGroups:
- servicecatalog.k8s.io
resources:
- - serviceclasses
+ - clusterserviceclasses
+ - clusterserviceplans
verbs:
- list
- watch
- get
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: servicecatalog-serviceclass-viewer-binding
roleRef:
@@ -37,8 +38,8 @@ objects:
metadata:
name: service-catalog-apiserver
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: sar-creator
rules:
@@ -49,17 +50,19 @@ objects:
verbs:
- create
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-sar-creator-binding
roleRef:
name: sar-creator
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: namespace-viewer
rules:
@@ -72,26 +75,30 @@ objects:
- watch
- get
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-namespace-viewer-binding
roleRef:
name: namespace-viewer
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-controller-namespace-viewer-binding
roleRef:
name: namespace-viewer
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-controller
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-controller
+ namespace: kube-service-catalog
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: service-catalog-controller
rules:
@@ -102,6 +109,7 @@ objects:
verbs:
- create
- update
+ - patch
- delete
- get
- list
@@ -109,19 +117,22 @@ objects:
- apiGroups:
- servicecatalog.k8s.io
resources:
- - brokers/status
- - instances/status
- - bindings/status
+ - clusterservicebrokers/status
+ - serviceinstances/status
+ - servicebindings/status
+ - servicebindings/finalizers
+ - serviceinstances/reference
verbs:
- update
- apiGroups:
- servicecatalog.k8s.io
resources:
- - brokers
- - instances
- - bindings
+ - clusterservicebrokers
+ - serviceinstances
+ - servicebindings
verbs:
- list
+ - get
- watch
- apiGroups:
- ""
@@ -133,7 +144,8 @@ objects:
- apiGroups:
- servicecatalog.k8s.io
resources:
- - serviceclasses
+ - clusterserviceclasses
+ - clusterserviceplans
verbs:
- create
- delete
@@ -154,17 +166,19 @@ objects:
- list
- watch
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-controller-binding
roleRef:
name: service-catalog-controller
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-controller
-
-- kind: Role
- apiVersion: v1
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-controller
+ namespace: kube-service-catalog
+
+- apiVersion: authorization.openshift.io/v1
+ kind: Role
metadata:
name: endpoint-accessor
rules:
@@ -179,21 +193,25 @@ objects:
- create
- update
-- kind: RoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: RoleBinding
metadata:
- name: endpoint-accessor-binding
+ name: endpointer-accessor-binding
roleRef:
name: endpoint-accessor
namespace: kube-service-catalog
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-controller
+ subjects:
+ - kind: ServiceAccount
+ namespace: kube-service-catalog
+ name: service-catalog-controller
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: system:auth-delegator-binding
roleRef:
name: system:auth-delegator
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
diff --git a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
index f6ee0955d..e1af51ce6 100644
--- a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
+++ b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
@@ -4,8 +4,8 @@ metadata:
name: kube-system-service-catalog
objects:
-- kind: Role
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: Role
metadata:
name: extension-apiserver-authentication-reader
namespace: ${KUBE_SYSTEM_NAMESPACE}
@@ -19,16 +19,18 @@ objects:
verbs:
- get
-- kind: RoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: RoleBinding
metadata:
name: extension-apiserver-authentication-reader-binding
namespace: ${KUBE_SYSTEM_NAMESPACE}
roleRef:
name: extension-apiserver-authentication-reader
- namespace: kube-system
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ namespace: ${KUBE_SYSTEM_NAMESPACE}
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
parameters:
- description: Do not change this value.
diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml
index cc897b032..416bdac70 100644
--- a/roles/openshift_service_catalog/tasks/generate_certs.yml
+++ b/roles/openshift_service_catalog/tasks/generate_certs.yml
@@ -36,19 +36,28 @@
- name: tls.key
path: "{{ generated_certs_dir }}/apiserver.key"
+- name: Create service-catalog-ssl secret
+ oc_secret:
+ state: present
+ name: service-catalog-ssl
+ namespace: kube-service-catalog
+ files:
+ - name: tls.crt
+ path: "{{ generated_certs_dir }}/apiserver.crt"
+
- slurp:
src: "{{ generated_certs_dir }}/ca.crt"
register: apiserver_ca
- shell: >
- oc get apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
+ oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
register: get_apiservices
changed_when: no
- name: Create api service
oc_obj:
state: present
- name: v1alpha1.servicecatalog.k8s.io
+ name: v1beta1.servicecatalog.k8s.io
kind: apiservices.apiregistration.k8s.io
namespace: "kube-service-catalog"
content:
@@ -57,10 +66,10 @@
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
- name: v1alpha1.servicecatalog.k8s.io
+ name: v1beta1.servicecatalog.k8s.io
spec:
group: servicecatalog.k8s.io
- version: v1alpha1
+ version: v1beta1
service:
namespace: "kube-service-catalog"
name: apiserver
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index e202ae173..1e94c8c5d 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -90,14 +90,14 @@
vars:
original_content: "{{ edit_yaml.results.results[0] | to_yaml }}"
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update edit role for service catalog and pod preset access
command: >
oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
- oc_obj:
name: admin
@@ -113,14 +113,14 @@
vars:
original_content: "{{ admin_yaml.results.results[0] | to_yaml }}"
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update admin role for service catalog and pod preset access
command: >
oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
- oc_adm_policy_user:
namespace: kube-service-catalog
diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml
index 2fb1ec440..96ae61507 100644
--- a/roles/openshift_service_catalog/tasks/remove.yml
+++ b/roles/openshift_service_catalog/tasks/remove.yml
@@ -1,7 +1,7 @@
---
- name: Remove Service Catalog APIServer
command: >
- oc delete apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
+ oc delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
- name: Remove Policy Binding
command: >
@@ -13,7 +13,7 @@
# state: absent
# namespace: "kube-service-catalog"
# kind: apiservices.apiregistration.k8s.io
-# name: v1alpha1.servicecatalog.k8s.io
+# name: v1beta1.servicecatalog.k8s.io
- name: Remove Service Catalog API Server route
oc_obj:
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
index c09834fd4..5d5352c1c 100644
--- a/roles/openshift_service_catalog/templates/api_server.j2
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -41,7 +41,9 @@ spec:
- --cors-allowed-origins
- {{ cors_allowed_origin }}
- --admission-control
- - "KubernetesNamespaceLifecycle"
+ - KubernetesNamespaceLifecycle,DefaultServicePlan,ServiceBindingsLifecycle,ServicePlanChangeValidator,BrokerAuthSarCheck
+ - --feature-gates
+ - OriginatingIdentity=true
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
command: ["/usr/bin/apiserver"]
imagePullPolicy: Always
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
index 1bbc0fa2c..2272cbb44 100644
--- a/roles/openshift_service_catalog/templates/controller_manager.j2
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -31,7 +31,12 @@ spec:
args:
- -v
- "5"
- - "--leader-election-namespace=$(K8S_NAMESPACE)"
+ - --leader-election-namespace
+ - kube-service-catalog
+ - --broker-relist-interval
+ - "5m"
+ - --feature-gates
+ - OriginatingIdentity=true
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
command: ["/usr/bin/controller-manager"]
imagePullPolicy: Always
@@ -41,7 +46,19 @@ spec:
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - mountPath: /var/run/kubernetes-service-catalog
+ name: service-catalog-ssl
+ readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
securityContext: {}
terminationGracePeriodSeconds: 30
+ volumes:
+ - name: service-catalog-ssl
+ secret:
+ defaultMode: 420
+ items:
+ - key: tls.crt
+ path: apiserver.crt
+ secretName: apiserver-ssl
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index 3047fbaf9..c4e023c1e 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -35,6 +35,9 @@
- "{{ openshift.logging }}"
- "{{ openshift.loggingops }}"
- "{{ openshift.hosted.etcd }}"
+ - "{{ openshift.prometheus }}"
+ - "{{ openshift.prometheus.alertmanager }}"
+ - "{{ openshift.prometheus.alertbuffer }}"
- name: Configure exports
template:
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index 0141e0d25..c2a741035 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -3,3 +3,6 @@
{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }}
{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}
{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}
+{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }}
+{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }}
+{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }}
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 54008bbf1..6a532a206 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -6,7 +6,7 @@
- "{{ openshift_deployment_type | default(deployment_type) }}.yml"
- "default_images.yml"
-- name: set ansible_service_broker facts
+- name: set template_service_broker facts
set_fact:
template_service_broker_prefix: "{{ template_service_broker_prefix | default(__template_service_broker_prefix) }}"
template_service_broker_version: "{{ template_service_broker_version | default(__template_service_broker_version) }}"
@@ -76,7 +76,7 @@
when: openshift_master_config_dir is undefined
- slurp:
- src: "{{ openshift_master_config_dir }}/ca.crt"
+ src: "{{ openshift_master_config_dir }}/service-signer.crt"
register: __ca_bundle
# Register with broker