summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--docs/proposals/role_decomposition.md6
-rw-r--r--files/origin-components/template-service-broker-registration.yaml4
-rw-r--r--inventory/byo/hosts.example6
-rw-r--r--openshift-ansible.spec107
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml2
-rw-r--r--playbooks/common/openshift-etcd/embedded2external.yml2
-rw-r--r--playbooks/common/openshift-master/additional_config.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml6
-rw-r--r--playbooks/common/openshift-node/additional_config.yml14
-rw-r--r--roles/ansible_service_broker/defaults/main.yml5
-rw-r--r--roles/ansible_service_broker/tasks/install.yml50
-rw-r--r--roles/ansible_service_broker/tasks/remove.yml12
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml2
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml5
-rw-r--r--roles/docker/tasks/main.yml9
-rw-r--r--roles/docker/tasks/package_docker.yml20
-rw-r--r--roles/docker/tasks/registry_auth.yml12
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml34
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml36
-rw-r--r--roles/etcd/tasks/system_container.yml5
-rw-r--r--roles/kuryr/README.md38
-rw-r--r--roles/kuryr/defaults/main.yaml72
-rw-r--r--roles/kuryr/meta/main.yml17
-rw-r--r--roles/kuryr/tasks/master.yaml52
-rw-r--r--roles/kuryr/tasks/node.yaml48
-rw-r--r--roles/kuryr/tasks/serviceaccount.yaml31
-rw-r--r--roles/kuryr/templates/cni-daemonset.yaml.j253
-rw-r--r--roles/kuryr/templates/configmap.yaml.j2343
-rw-r--r--roles/kuryr/templates/controller-deployment.yaml.j240
-rw-r--r--roles/openshift_atomic/README.md28
-rw-r--r--roles/openshift_atomic/meta/main.yml13
-rw-r--r--roles/openshift_atomic/tasks/proxy.yml32
-rw-r--r--roles/openshift_cli/tasks/main.yml13
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh9
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py3
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py7
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py5
-rw-r--r--roles/openshift_logging/README.md93
-rw-r--r--roles/openshift_logging/defaults/main.yml36
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py19
-rw-r--r--roles/openshift_logging/filter_plugins/test34
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py2
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml9
-rw-r--r--roles/openshift_logging/templates/jks_pod.j22
-rw-r--r--roles/openshift_logging_curator/defaults/main.yml5
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml1
-rw-r--r--roles/openshift_logging_curator/templates/curator.j217
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml5
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml11
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j211
-rw-r--r--roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_logging_eventrouter/README.md6
-rw-r--r--roles/openshift_logging_eventrouter/defaults/main.yaml3
-rw-r--r--roles/openshift_logging_eventrouter/files/eventrouter-template.yaml2
-rw-r--r--roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml2
-rw-r--r--roles/openshift_logging_eventrouter/templates/eventrouter-template.j28
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml5
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml3
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j225
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml2
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j242
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml13
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml1
-rw-r--r--roles/openshift_logging_mux/templates/mux.j221
-rw-r--r--roles/openshift_master/defaults/main.yml3
-rw-r--r--roles/openshift_master/tasks/journald.yml22
-rw-r--r--roles/openshift_master/tasks/main.yml25
-rw-r--r--roles/openshift_master/tasks/system_container.yml5
-rw-r--r--roles/openshift_master/tasks/upgrade_facts.yml4
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j22
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j22
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py1
-rw-r--r--roles/openshift_metrics/README.md75
-rw-r--r--roles/openshift_metrics/templates/hawkular_cassandra_rc.j22
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j22
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j22
-rw-r--r--roles/openshift_metrics/templates/heapster.j22
-rw-r--r--roles/openshift_node/defaults/main.yml3
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml5
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml5
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/openshift_node_certificates/handlers/main.yml16
-rw-r--r--roles/openshift_node_dnsmasq/README.md27
-rw-r--r--roles/openshift_node_dnsmasq/defaults/main.yml1
-rw-r--r--roles/openshift_node_dnsmasq/tasks/network-manager.yml1
-rw-r--r--roles/openshift_prometheus/README.md35
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml24
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml15
-rw-r--r--roles/openshift_prometheus/templates/prometheus.j2 (renamed from roles/openshift_prometheus/templates/prometheus_deployment.j2)23
-rw-r--r--roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml12
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml13
-rw-r--r--roles/openshift_sanitize_inventory/tasks/unsupported.yml8
-rw-r--r--roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml112
-rw-r--r--roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml18
-rw-r--r--roles/openshift_service_catalog/tasks/generate_certs.yml27
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml18
-rw-r--r--roles/openshift_service_catalog/tasks/remove.yml28
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j24
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j219
-rw-r--r--roles/openshift_service_catalog/templates/sc_role_patching.j24
-rw-r--r--roles/template_service_broker/tasks/install.yml4
-rw-r--r--roles/template_service_broker/tasks/remove.yml4
112 files changed, 1818 insertions, 379 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 6200c06ab..b340654f4 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.162.0 ./
+3.7.0-0.178.0 ./
diff --git a/docs/proposals/role_decomposition.md b/docs/proposals/role_decomposition.md
index b6c1d8c5b..6434e24e7 100644
--- a/docs/proposals/role_decomposition.md
+++ b/docs/proposals/role_decomposition.md
@@ -158,13 +158,13 @@ providing the location of the generated certificates to the individual roles.
openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}"
openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}"
openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}"
- openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}"
openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}"
+ openshift_logging_kibana_cpu_request: "{{ openshift_logging_kibana_ops_cpu_request }}"
openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}"
openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}"
openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}"
- openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}"
openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}"
+ openshift_logging_kibana_proxy_cpu_request: "{{ openshift_logging_kibana_ops_proxy_cpu_request }}"
openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}"
openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}"
openshift_logging_kibana_ca: "{{ openshift_logging_kibana_ops_ca}}"
@@ -193,8 +193,8 @@ providing the location of the generated certificates to the individual roles.
openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
- openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"
openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}"
+ openshift_logging_curator_cpu_request: "{{ openshift_logging_curator_ops_cpu_request }}"
openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}"
when:
- openshift_logging_use_ops | bool
diff --git a/files/origin-components/template-service-broker-registration.yaml b/files/origin-components/template-service-broker-registration.yaml
index 2086978f0..95fb72924 100644
--- a/files/origin-components/template-service-broker-registration.yaml
+++ b/files/origin-components/template-service-broker-registration.yaml
@@ -9,8 +9,8 @@ parameters:
required: true
objects:
# register the tsb with the service catalog
-- apiVersion: servicecatalog.k8s.io/v1alpha1
- kind: ServiceBroker
+- apiVersion: servicecatalog.k8s.io/v1beta1
+ kind: ClusterServiceBroker
metadata:
name: template-service-broker
spec:
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index 499a9d8e7..7c4a7885d 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -632,6 +632,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_storage_volume_name=prometheus
#openshift_prometheus_storage_volume_size=10Gi
#openshift_prometheus_storage_labels={'storage': 'prometheus'}
+#openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_kind=nfs
#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
@@ -640,6 +641,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_volume_size=10Gi
#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
+#openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_kind=nfs
#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
@@ -648,6 +650,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
+#openshift_prometheus_alertbuffer_storage_type='pvc'
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -660,6 +663,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_storage_volume_name=prometheus
#openshift_prometheus_storage_volume_size=10Gi
#openshift_prometheus_storage_labels={'storage': 'prometheus'}
+#openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_kind=nfs
#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
@@ -668,6 +672,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_volume_size=10Gi
#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
+#openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_kind=nfs
#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
@@ -676,6 +681,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
+#openshift_prometheus_alertbuffer_storage_type='pvc'
#
# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes
# which are destroyed when pods are deleted
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 595b7f19b..5ca9ac3a9 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.162.0%{?dist}
+Release: 0.178.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -280,6 +280,111 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Oct 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.178.0
+- Split prometheus image defaults to prefix and version (zgalor@redhat.com)
+- Remove extraneous spaces that yamllint dislikes (staebler@redhat.com)
+- Fix edit and admin role patching for service catalog (staebler@redhat.com)
+- strip dash when comparing version with Python3 (jchaloup@redhat.com)
+- Bug 1452939 - change Logging & Metrics imagePullPolicy (jwozniak@redhat.com)
+- Remove role bindings during service catalog un-install (staebler@redhat.com)
+- Fix a few small issues in service catalog uninstall (staebler@redhat.com)
+- Remove incorrect validation for OpenIDIdentityProvider (mgugino@redhat.com)
+- Enable oreg_auth credential replace during upgrades (mgugino@redhat.com)
+- Handle bootstrap behavior in GCP template (ccoleman@redhat.com)
+- Ensure upgrades apply latest journald settings (mgugino@redhat.com)
+
+* Tue Oct 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.177.0
+- Check if the master service is non-ha or not (jchaloup@redhat.com)
+- Correct host group for controller restart (rteague@redhat.com)
+- Set the proper external etcd ip address when migrating embeded etcd
+ (jchaloup@redhat.com)
+- Switch to stateful set in prometheus (zgalor@redhat.com)
+- cli: use the correct name for the master system container
+ (gscrivan@redhat.com)
+- cli: do not pull again the image when using Docker (gscrivan@redhat.com)
+- verstion_gte seems unreliable on containerized installs (sdodson@redhat.com)
+- Retry reconcile in case of error and give up eventually (simo@redhat.com)
+- Updating ocp es proxy image to use openshift_logging_proxy_image_prefix if
+ specified (ewolinet@redhat.com)
+- Generate all internal hostnames of no_proxy (ghuang@redhat.com)
+- Add nfs variables documentation to README file (zgalor@redhat.com)
+- Avoid undefined variable in master sysconfig template (hansmi@vshn.ch)
+- Ensure proper variable templating for skopeo auth credentials
+ (mgugino@redhat.com)
+
+* Mon Oct 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.176.0
+- Update defaults (fabian@fabianism.us)
+- Use service-ca.crt instead of master ca.crt (fabian@fabianism.us)
+- use master cert (fabian@fabianism.us)
+- Bug 1496426 - add asb-client secret to openshift-ansible-service-broker
+ namespace (fabian@fabianism.us)
+- docker: Move enterprise registry from pkg to main (smilner@redhat.com)
+- systemcontainers: Verify atomic.conf proxy is always configured
+ (smilner@redhat.com)
+- Add variable to control whether NetworkManager hook is installed
+ (hansmi@vshn.ch)
+
+* Mon Oct 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.175.0
+-
+
+* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.174.0
+-
+
+* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.173.0
+-
+
+* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.172.0
+-
+
+* Sat Oct 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.171.0
+- Use "requests" for CPU resources instead of limits
+ (peter.portante@redhat.com)
+- [bz1501271] Attempt to use ami ssh user and default to ansible_ssh_user.
+ (kwoodson@redhat.com)
+- Fix undefined variable for master upgrades (mgugino@redhat.com)
+- Adding pre check to verify clusterid is set along with cloudprovider when
+ performing upgrade. (kwoodson@redhat.com)
+
+* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.170.0
+- Check for container runtime prior to restarting when updating system CA
+ trust. (abutcher@redhat.com)
+- bug 1489498. preserve replica and shard settings (jcantril@redhat.com)
+- Set servingInfo.clientCA to ca.crt during upgrade. (abutcher@redhat.com)
+
+* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.169.0
+- Initial Kuryr support (mdulko@redhat.com)
+- Indentation errors (dymurray@redhat.com)
+- Bug 1503233 - Add liveness and readiness probe checks to ASB deploymentconfig
+ (dymurray@redhat.com)
+
+* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.168.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.167.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.166.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.165.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.164.0
+- Change to service-signer.crt for template_service_broker CA_BUNDLE
+ (staebler@redhat.com)
+- Use service-signer.crt for ca_bundle passed to clusterservicebroker
+ (staebler@redhat.com)
+- Rename ServiceBroker to ClusterServiceBroker for ansible_service_broker task.
+ (staebler@redhat.com)
+- Add apiserver.crt to service-catalog controller-manager deployment.
+ (staebler@redhat.com)
+- Remove redundant faulty role binding ifrom
+ kubeservicecatalog_roles_bindings.yml (staebler@redhat.com)
+- Update service catalog playbook for service-catalog rc1 (staebler@redhat.com)
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.163.0
+- set use_manageiq as default (efreiber@redhat.com)
+
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.162.0
- Wait longer for stable GCP instances (ccoleman@redhat.com)
- Remove unneeded master config updates during upgrades (mgugino@redhat.com)
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index ee281929a..5b4a6a1e8 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -26,7 +26,7 @@
tasks:
- name: set the user to perform installation
set_fact:
- ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default('root') }}"
+ ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default(ansible_ssh_user) }}"
openshift_node_bootstrap: True
# This is the part that installs all of the software and configs for the instance
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index be2f8b5f4..0f563adb7 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -145,7 +145,19 @@
https_proxy: "{{ openshift_https_proxy | default(None) }}"
no_proxy: "{{ openshift_no_proxy | default(None) }}"
generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
- no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+
+ - name: Set fact of no_proxy_internal_hostnames
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- name: initialize_facts set_fact repoquery command
set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
index 45022cd61..6a5bc24f7 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -9,16 +9,29 @@
local_facts:
ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
+ - when: openshift.common.is_containerized | bool
+ block:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type }}-master"
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
+ # In case of the non-ha to ha upgrade.
+ - name: Check if the {{ openshift.common.service_type }}-master-api.service exists
+ command: >
+ systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend
+ register: master_api_service_status
+
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ when:
+ - master_api_service_status.stdout_lines | length > 0
+ - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 142ce5f3d..13fa37b09 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -4,6 +4,12 @@
msg: Verify OpenShift is already installed
when: openshift.common.version is not defined
+- name: Update oreg_auth docker login credentials if necessary
+ include_role:
+ name: docker
+ tasks_from: registry_auth.yml
+ when: oreg_auth_user is defined
+
- name: Verify containers are available for upgrade
command: >
docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
@@ -37,7 +43,7 @@
fail:
msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<')
+ - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index c37a5f9ab..a5e2f7940 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -90,6 +90,9 @@
- include_vars: ../../../../roles/openshift_master/vars/main.yml
+ - name: Update journald config
+ include: ../../../../roles/openshift_master/tasks/journald.yml
+
- name: Remove any legacy systemd units and update systemd units
include: ../../../../roles/openshift_master/tasks/systemd_units.yml
@@ -199,7 +202,7 @@
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version | version_compare('3.7','<')
changed_when:
- reconcile_cluster_role_result.stdout != ''
- reconcile_cluster_role_result.rc == 0
@@ -214,7 +217,7 @@
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
--additive-only=true --confirm -o name
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version | version_compare('3.7','<')
register: reconcile_bindings_result
changed_when:
- reconcile_bindings_result.stdout != ''
@@ -229,9 +232,11 @@
changed_when:
- reconcile_jenkins_role_binding_result.stdout != ''
- reconcile_jenkins_role_binding_result.rc == 0
- when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool)
+ when:
+ - openshift_version | version_compare('3.7','<')
+ - openshift_version | version_compare('3.4','>=')
- - when: (openshift.common.version_gte_3_6 | bool) and (not openshift.common.version_gte_3_7 | bool)
+ - when: openshift_upgrade_target | version_compare('3.7','<')
block:
- name: Retrieve shared-resource-viewer
oc_obj:
@@ -250,7 +255,6 @@
- "'annotations' in objout['results']['results'][0]['metadata']"
- "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']"
- "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'"
-
- copy:
src: "{{ item }}"
dest: "/tmp/{{ item }}"
@@ -268,6 +272,12 @@
- "/tmp/{{ __master_shared_resource_viewer_file }}"
delete_after: true
when: __shared_resource_viewer_protected is not defined
+ register: result
+ retries: 3
+ delay: 5
+ until: result.rc == 0
+ ignore_errors: true
+
- name: Reconcile Security Context Constraints
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
index 52458e03c..db0c8f886 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
@@ -8,3 +8,8 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
yaml_value: service-signer.key
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
index c26e8f744..1d4d1919c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
@@ -13,3 +13,8 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
yaml_value: service-signer.key
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index bf3b94682..81f6dc8a4 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -125,7 +125,7 @@
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
- hosts: oo_etcd_to_config
+ hosts: oo_masters_to_config
gather_facts: no
tasks:
- name: Stop {{ openshift.common.service_type }}-master-controllers
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index f76fc68d1..8e4f99c91 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -15,7 +15,7 @@
- name: Confirm OpenShift authorization objects are in sync
command: >
{{ openshift.common.client_binary }} adm migrate authorization
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version | version_compare('3.7','<')
changed_when: false
register: l_oc_result
until: l_oc_result.rc == 0
diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml
index 9264f3c32..b16b78c4f 100644
--- a/playbooks/common/openshift-etcd/embedded2external.yml
+++ b/playbooks/common/openshift-etcd/embedded2external.yml
@@ -158,7 +158,7 @@
tasks_from: configure_external_etcd
vars:
etcd_peer_url_scheme: "https"
- etcd_ip: "{{ openshift.common.ip }}"
+ etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}"
etcd_peer_port: 2379
# 9. start the master
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index 1b3eb268a..e1472ce38 100644
--- a/playbooks/common/openshift-master/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -25,7 +25,7 @@
- role: openshift_hosted_templates
registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_manageiq
- when: openshift_use_manageiq | default(false) | bool
+ when: openshift_use_manageiq | default(true) | bool
- role: cockpit
when:
- openshift.common.is_atomic
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 6e57f282e..b359919ba 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -206,6 +206,12 @@
when: openshift_use_nuage | default(false) | bool
- role: calico_master
when: openshift_use_calico | default(false) | bool
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: master
+ when: openshift_use_kuryr | default(false) | bool
+
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml
index fe51ef833..ac757397b 100644
--- a/playbooks/common/openshift-node/additional_config.yml
+++ b/playbooks/common/openshift-node/additional_config.yml
@@ -19,10 +19,14 @@
- group_by:
key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }}
changed_when: False
+ # Create group for kuryr nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_kuryr | default(False)) | ternary('kuryr','nothing') }}
+ changed_when: False
- include: etcd_client_config.yml
vars:
- openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv"
+ openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv:oo_nodes_use_kuryr"
- name: Additional node config
hosts: oo_nodes_use_flannel
@@ -50,3 +54,11 @@
- role: contiv
contiv_role: netplugin
when: openshift_use_contiv | default(false) | bool
+
+- name: Configure Kuryr node
+ hosts: oo_nodes_use_kuryr
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: node
+ when: openshift_use_kuryr | default(false) | bool
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml
index fa982d533..dc05b03b5 100644
--- a/roles/ansible_service_broker/defaults/main.yml
+++ b/roles/ansible_service_broker/defaults/main.yml
@@ -13,7 +13,4 @@ ansible_service_broker_launch_apb_on_bind: false
ansible_service_broker_image_pull_policy: IfNotPresent
ansible_service_broker_sandbox_role: edit
-ansible_service_broker_auto_escalate: true
-ansible_service_broker_registry_tag: latest
-ansible_service_broker_registry_whitelist:
- - '.*-apb$'
+ansible_service_broker_auto_escalate: false
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index 0f4b71124..66c3d9cc4 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -22,19 +22,14 @@
ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}"
ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}"
ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}"
-
- ansible_service_broker_certs_dir: "{{ openshift.common.config_base }}/service-catalog"
+ ansible_service_broker_registry_tag: "{{ ansible_service_broker_registry_tag | default(__ansible_service_broker_registry_tag) }}"
+ ansible_service_broker_registry_whitelist: "{{ ansible_service_broker_registry_whitelist | default(__ansible_service_broker_registry_whitelist) }}"
- name: set ansible-service-broker image facts using set prefix and tag
set_fact:
ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"
ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}"
-- slurp:
- src: "{{ ansible_service_broker_certs_dir }}/ca.crt"
- register: catalog_ca
-
-
- include: validate_facts.yml
@@ -79,13 +74,12 @@
state: present
name: asb-access
rules:
- - nonResourceURLs: ["/ansible-service-broker", "ansible-service-broker/*"]
+ - nonResourceURLs: ["/ansible-service-broker", "/ansible-service-broker/*"]
verbs: ["get", "post", "put", "patch", "delete"]
- name: Bind admin cluster-role to asb serviceaccount
oc_adm_policy_user:
state: present
- namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: admin
user: "system:serviceaccount:openshift-ansible-service-broker:asb"
@@ -93,7 +87,6 @@
- name: Bind auth cluster role to asb service account
oc_adm_policy_user:
state: present
- namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: asb-auth
user: "system:serviceaccount:openshift-ansible-service-broker:asb"
@@ -101,7 +94,6 @@
- name: Bind asb-access role to asb-client service account
oc_adm_policy_user:
state: present
- namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: asb-access
user: "system:serviceaccount:openshift-ansible-service-broker:asb-client"
@@ -109,6 +101,7 @@
- name: create asb-client token secret
oc_obj:
name: asb-client
+ namespace: openshift-ansible-service-broker
state: present
kind: Secret
content:
@@ -118,10 +111,20 @@
kind: Secret
metadata:
name: asb-client
+ namespace: openshift-ansible-service-broker
annotations:
kubernetes.io/service-account.name: asb-client
type: kubernetes.io/service-account-token
+- oc_secret:
+ state: list
+ namespace: openshift-ansible-service-broker
+ name: asb-client
+ register: asb_client_secret
+
+- set_fact:
+ service_ca_crt: asb_client_secret.results.results.0.data['service-ca.crt']
+
# Using oc_obj because oc_service doesn't seem to allow annotations
# TODO: Extend oc_service to allow annotations
- name: create ansible-service-broker service
@@ -137,6 +140,7 @@
kind: Service
metadata:
name: asb
+ namespace: openshift-ansible-service-broker
labels:
app: openshift-ansible-service-broker
service: asb
@@ -231,6 +235,20 @@
value: /etc/ansible-service-broker/config.yaml
resources: {}
terminationMessagePath: /tmp/termination-log
+ readinessProbe:
+ httpGet:
+ port: 1338
+ path: /healthz
+ scheme: HTTPS
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ livenessProbe:
+ httpGet:
+ port: 1338
+ path: /healthz
+ scheme: HTTPS
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
- image: "{{ ansible_service_broker_etcd_image }}"
name: etcd
@@ -327,20 +345,20 @@
oc_obj:
name: ansible-service-broker
state: present
- kind: ServiceBroker
+ kind: ClusterServiceBroker
content:
path: /tmp/brokerout
data:
- apiVersion: servicecatalog.k8s.io/v1alpha1
- kind: ServiceBroker
+ apiVersion: servicecatalog.k8s.io/v1beta1
+ kind: ClusterServiceBroker
metadata:
name: ansible-service-broker
spec:
- url: http://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker
+ url: https://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker
authInfo:
bearer:
secretRef:
name: asb-client
namespace: openshift-ansible-service-broker
kind: Secret
- caBundle: "{{ catalog_ca.content }}"
+ caBundle: "{{ service_ca_crt }}"
diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml
index f0a6be226..51b86fb26 100644
--- a/roles/ansible_service_broker/tasks/remove.yml
+++ b/roles/ansible_service_broker/tasks/remove.yml
@@ -85,9 +85,9 @@
- name: remove secret for broker auth
oc_obj:
- name: asb-auth-secret
+ name: asb-client
namespace: openshift-ansible-service-broker
- kind: Broker
+ kind: Secret
state: absent
# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
@@ -99,11 +99,17 @@
kind: ConfigMap
# TODO: Is this going to work?
+- shell: >
+ oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
+ register: get_apiservices
+ changed_when: no
+
- name: remove broker object from the catalog
oc_obj:
name: ansible-service-broker
state: absent
- kind: ServiceBroker
+ kind: ClusterServiceBroker
+ when: not "'not found' in get_apiservices.stdout"
- name: remove openshift-ansible-service-broker project
oc_project:
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
index 3e9639adf..8438e993f 100644
--- a/roles/ansible_service_broker/vars/default_images.yml
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -13,3 +13,5 @@ __ansible_service_broker_registry_url: null
__ansible_service_broker_registry_user: null
__ansible_service_broker_registry_password: null
__ansible_service_broker_registry_organization: null
+__ansible_service_broker_registry_tag: latest
+__ansible_service_broker_registry_whitelist: []
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
index 9c576cb76..fc58b4fd8 100644
--- a/roles/ansible_service_broker/vars/openshift-enterprise.yml
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -1,7 +1,7 @@
---
__ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose-
-__ansible_service_broker_image_tag: v3.6
+__ansible_service_broker_image_tag: v3.7
__ansible_service_broker_etcd_image_prefix: rhel7/
__ansible_service_broker_etcd_image_tag: latest
@@ -14,3 +14,6 @@ __ansible_service_broker_registry_url: "https://registry.access.redhat.com"
__ansible_service_broker_registry_user: null
__ansible_service_broker_registry_password: null
__ansible_service_broker_registry_organization: null
+__ansible_service_broker_registry_tag: v3.7
+__ansible_service_broker_registry_whitelist:
+ - '.*-apb$'
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index f73f90686..5ea73568a 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -10,6 +10,15 @@
l_use_crio: "{{ openshift_use_crio | default(False) }}"
l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}"
+- name: Add enterprise registry, if necessary
+ set_fact:
+ l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
+ when:
+ - openshift.common.deployment_type == 'openshift-enterprise'
+ - openshift_docker_ent_reg != ''
+ - openshift_docker_ent_reg not in l2_docker_additional_registries
+ - not l_use_crio_only
+
- name: Use Package Docker if Requested
include: package_docker.yml
when:
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index 7ccab37a5..b16413f72 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -52,14 +52,6 @@
- restart docker
when: not (os_firewall_use_firewalld | default(False)) | bool
-- name: Add enterprise registry, if necessary
- set_fact:
- l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
- when:
- - openshift.common.deployment_type == 'openshift-enterprise'
- - openshift_docker_ent_reg != ''
- - openshift_docker_ent_reg not in l2_docker_additional_registries
-
- stat: path=/etc/sysconfig/docker
register: docker_check
@@ -161,16 +153,6 @@
- set_fact:
docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
-- name: Check for credentials file for registry auth
- stat:
- path: "{{ docker_cli_auth_config_path }}/config.json"
- when: oreg_auth_user is defined
- register: docker_cli_auth_credentials_stat
-
-- name: Create credentials for docker cli registry auth
- command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - oreg_auth_user is defined
- - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+- include: registry_auth.yml
- meta: flush_handlers
diff --git a/roles/docker/tasks/registry_auth.yml b/roles/docker/tasks/registry_auth.yml
new file mode 100644
index 000000000..65ed60efa
--- /dev/null
+++ b/roles/docker/tasks/registry_auth.yml
@@ -0,0 +1,12 @@
+---
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ docker_cli_auth_config_path }}/config.json"
+ when: oreg_auth_user is defined
+ register: docker_cli_auth_credentials_stat
+
+- name: Create credentials for docker cli registry auth
+ command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index a79600930..13bbd359e 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -82,36 +82,10 @@
enabled: yes
state: restarted
-
-- block:
-
- - name: Add http_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?http_proxy[:=]{1}"
- line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
- when:
- - openshift.common.http_proxy is defined
- - openshift.common.http_proxy != ''
-
- - name: Add https_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?https_proxy[:=]{1}"
- line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
- when:
- - openshift.common.https_proxy is defined
- - openshift.common.https_proxy != ''
-
- - name: Add no_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?no_proxy[:=]{1}"
- line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
- when:
- - openshift.common.no_proxy is defined
- - openshift.common.no_proxy != ''
-
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
- block:
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 15c6a55db..726e8ada7 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -68,38 +68,10 @@
retries: 3
delay: 30
-
-# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf
-# regexp: the line starts with or without #, followed by the string
-# http_proxy, then either : or =
-- block:
-
- - name: Add http_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?http_proxy[:=]{1}"
- line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
- when:
- - openshift.common.http_proxy is defined
- - openshift.common.http_proxy != ''
-
- - name: Add https_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?https_proxy[:=]{1}"
- line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
- when:
- - openshift.common.https_proxy is defined
- - openshift.common.https_proxy != ''
-
- - name: Add no_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?no_proxy[:=]{1}"
- line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
- when:
- - openshift.common.no_proxy is defined
- - openshift.common.no_proxy != ''
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
- block:
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 024479fb4..9a6951920 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -2,6 +2,11 @@
- set_fact:
l_etcd_src_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
register: pull_result
diff --git a/roles/kuryr/README.md b/roles/kuryr/README.md
new file mode 100644
index 000000000..7b618f902
--- /dev/null
+++ b/roles/kuryr/README.md
@@ -0,0 +1,38 @@
+## OpenStack Kuryr
+
+Install Kuryr CNI components (kuryr-controller, kuryr-cni) on Master and worker
+nodes. Kuryr uses OpenStack Networking service (Neutron) to provide network for
+pods. This allows to have interconnectivity between pods and OpenStack VMs.
+
+## Requirements
+
+* Ansible 2.2+
+* Centos/ RHEL 7.3+
+
+## Current Kuryr restrictions when used with OpenShift
+
+* Openshift Origin only
+* OpenShift on OpenStack Newton or newer (only with Trunk ports)
+
+## Key Ansible inventory Kuryr master configuration parameters
+
+* ``openshift_use_kuryr=True``
+* ``openshift_use_openshift_sdn=False``
+* ``openshift_sdn_network_plugin_name='cni'``
+* ``kuryr_cni_link_interface=eth0``
+* ``kuryr_openstack_auth_url=keystone_url``
+* ``kuryr_openstack_user_domain_name=Default``
+* ``kuryr_openstack_user_project_name=Default``
+* ``kuryr_openstack_project_id=project_uuid``
+* ``kuryr_openstack_username=kuryr``
+* ``kuryr_openstack_password=kuryr_pass``
+* ``kuryr_openstack_pod_sg_id=pod_security_group_uuid``
+* ``kuryr_openstack_pod_subnet_id=pod_subnet_uuid``
+* ``kuryr_openstack_pod_service_id=service_subnet_uuid``
+* ``kuryr_openstack_pod_project_id=pod_project_uuid``
+* ``kuryr_openstack_worker_nodes_subnet_id=worker_nodes_subnet_uuid``
+
+## Kuryr resources
+
+* [Kuryr documentation](https://docs.openstack.org/kuryr-kubernetes/latest/)
+* [Installing Kuryr containerized](https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html)
diff --git a/roles/kuryr/defaults/main.yaml b/roles/kuryr/defaults/main.yaml
new file mode 100644
index 000000000..ff298dda0
--- /dev/null
+++ b/roles/kuryr/defaults/main.yaml
@@ -0,0 +1,72 @@
+---
+# Kuryr conf directory
+kuryr_config_dir: /etc/kuryr
+
+# Kuryr username
+kuryr_openstack_username: kuryr
+
+# Kuryr username domain
+kuryr_openstack_user_domain_name: default
+
+# Kuryr username domain
+kuryr_openstack_project_domain_name: default
+
+# Kuryr OpenShift namespace
+kuryr_namespace: kube-system
+
+# Whether to run the cni plugin in debug mode
+kuryr_cni_debug: "false"
+
+# The version of cni binaries
+cni_version: v0.5.2
+
+# Path to bin dir (where kuryr execs get installed)
+bin_dir: /usr/bin
+
+# Path to the cni binaries
+cni_bin_dir: /opt/cni/bin
+
+# URL for cni binaries
+cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/"
+cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tgz"
+cni_bin_checksum: "71f411080245aa14d0cc06f6824e8039607dd9e9"
+
+# Kuryr ClusterRole definiton
+kuryr_clusterrole:
+ name: kuryrctl
+ state: present
+ rules:
+ - apiGroups:
+ - ""
+ attributeRestrictions: null
+ verbs:
+ - get
+ - list
+ - watch
+ resources:
+ - daemonsets
+ - deployments
+ - deploymentconfigs
+ - endpoints
+ - ingress
+ - nodes
+ - namespaces
+ - pods
+ - projects
+ - routes
+ - services
+ - apiGroups:
+ - ""
+ attributeRestrictions: null
+ verbs:
+ - update
+ - patch
+ resources:
+ - endpoints
+ - ingress
+ - pods
+ - namespaces
+ - nodes
+ - services
+ - services/status
+ - routes
diff --git a/roles/kuryr/meta/main.yml b/roles/kuryr/meta/main.yml
new file mode 100644
index 000000000..7fd5adf41
--- /dev/null
+++ b/roles/kuryr/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Red Hat
+ description: Kuryr networking
+ company: Red Hat
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: lib_openshift }
+- { role: openshift_facts }
diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml
new file mode 100644
index 000000000..55ab16f74
--- /dev/null
+++ b/roles/kuryr/tasks/master.yaml
@@ -0,0 +1,52 @@
+---
+- name: Perform OpenShit ServiceAccount config
+ include: serviceaccount.yaml
+
+- name: Create kuryr manifests tempdir
+ command: mktemp -d
+ register: manifests_tmpdir
+
+- name: Create kuryr ConfigMap manifest
+ become: yes
+ template:
+ src: configmap.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/configmap.yaml"
+
+- name: Create kuryr-controller Deployment manifest
+ become: yes
+ template:
+ src: controller-deployment.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml"
+
+- name: Create kuryr-cni DaemonSet manifest
+ become: yes
+ template:
+ src: cni-daemonset.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml"
+
+- name: Apply ConfigMap manifest
+ oc_obj:
+ state: present
+ kind: ConfigMap
+ name: "kuryr-config"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/configmap.yaml"
+
+- name: Apply Controller Deployment manifest
+ oc_obj:
+ state: present
+ kind: Deployment
+ name: "kuryr-controller"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml"
+
+- name: Apply kuryr-cni DaemonSet manifest
+ oc_obj:
+ state: present
+ kind: DaemonSet
+ name: "kuryr-cni-ds"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml"
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
new file mode 100644
index 000000000..ffe814713
--- /dev/null
+++ b/roles/kuryr/tasks/node.yaml
@@ -0,0 +1,48 @@
+---
+- name: Create CNI bin directory
+ file:
+ state: directory
+ path: "{{ cni_bin_dir }}"
+ mode: 0755
+ owner: root
+ group: root
+ recurse: yes
+
+- name: Create CNI extraction tempdir
+ command: mktemp -d
+ register: cni_tmpdir
+
+- name: Download CNI
+ get_url:
+ url: "{{ cni_bin_url }}"
+ checksum: "sha1:{{ cni_bin_checksum }}"
+ mode: 0644
+ dest: "{{ cni_tmpdir.stdout }}"
+ register: downloaded_tarball
+
+- name: Extract CNI
+ become: yes
+ unarchive:
+ remote_src: True
+ src: "{{ downloaded_tarball.dest }}"
+ dest: "{{ cni_bin_dir }}"
+ when: downloaded_tarball.changed
+
+- name: Ensure CNI net.d exists
+ file:
+ path: /etc/cni/net.d
+ recurse: yes
+ state: directory
+
+- name: Configure OpenShift node with disabled service proxy
+ lineinfile:
+ dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node"
+ regexp: '^OPTIONS="?(.*?)"?$'
+ backrefs: yes
+ backup: yes
+ line: 'OPTIONS="\1 --disable dns,proxy,plugins"'
+
+- name: force node restart to disable the proxy
+ service:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
diff --git a/roles/kuryr/tasks/serviceaccount.yaml b/roles/kuryr/tasks/serviceaccount.yaml
new file mode 100644
index 000000000..088f13091
--- /dev/null
+++ b/roles/kuryr/tasks/serviceaccount.yaml
@@ -0,0 +1,31 @@
+---
+- name: Create Controller service account
+ oc_serviceaccount:
+ name: kuryr-controller
+ namespace: "{{ kuryr_namespace }}"
+ register: saout
+
+- name: Create a role for the Kuryr
+ oc_clusterrole: "{{ kuryr_clusterrole }}"
+
+- name: Fetch the created Kuryr controller cluster role
+ oc_clusterrole:
+ name: kuryrctl
+ state: list
+ register: crout
+
+- name: Grant Kuryr the privileged security context constraints
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}"
+ namespace: "{{ kuryr_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+
+- name: Assign role to Kuryr service account
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}"
+ namespace: "{{ kuryr_namespace }}"
+ resource_kind: cluster-role
+ resource_name: "{{ crout.results.results.metadata.name }}"
+ state: present
diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2
new file mode 100644
index 000000000..39348ae90
--- /dev/null
+++ b/roles/kuryr/templates/cni-daemonset.yaml.j2
@@ -0,0 +1,53 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: kuryr-cni-ds
+ namespace: {{ kuryr_namespace }}
+ labels:
+ tier: node
+ app: kuryr
+spec:
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: kuryr
+ spec:
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: kuryr-controller
+ containers:
+ - name: kuryr-cni
+ image: kuryr/cni:latest
+ imagePullPolicy: IfNotPresent
+ command: [ "cni_ds_init" ]
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: bin
+ mountPath: /opt/cni/bin
+ - name: net-conf
+ mountPath: /etc/cni/net.d
+ - name: config-volume
+ mountPath: /tmp/kuryr/kuryr.conf
+ subPath: kuryr-cni.conf
+ - name: etc
+ mountPath: /etc
+ volumes:
+ - name: bin
+ hostPath:
+ path: {{ cni_bin_dir }}
+ - name: net-conf
+ hostPath:
+ path: /etc/cni/net.d
+ - name: config-volume
+ configMap:
+ name: kuryr-config
+ - name: etc
+ hostPath:
+ path: /etc \ No newline at end of file
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
new file mode 100644
index 000000000..e874d6c25
--- /dev/null
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -0,0 +1,343 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kuryr-config
+ namespace: {{ kuryr_namespace }}
+data:
+ kuryr.conf: |+
+ [DEFAULT]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Directory for Kuryr vif binding executables. (string value)
+ #bindir = /usr/libexec/kuryr
+
+ # If set to true, the logging level will be set to DEBUG instead of the default
+ # INFO level. (boolean value)
+ # Note: This option can be changed without restarting.
+ #debug = false
+
+ # DEPRECATED: If set to false, the logging level will be set to WARNING instead
+ # of the default INFO level. (boolean value)
+ # This option is deprecated for removal.
+ # Its value may be silently ignored in the future.
+ #verbose = true
+
+ # The name of a logging configuration file. This file is appended to any
+ # existing logging configuration files. For details about logging configuration
+ # files, see the Python logging module documentation. Note that when logging
+ # configuration files are used then all logging configuration is set in the
+ # configuration file and other logging configuration options are ignored (for
+ # example, logging_context_format_string). (string value)
+ # Note: This option can be changed without restarting.
+ # Deprecated group/name - [DEFAULT]/log_config
+ #log_config_append = <None>
+
+ # Defines the format string for %%(asctime)s in log records. Default:
+ # %(default)s . This option is ignored if log_config_append is set. (string
+ # value)
+ #log_date_format = %Y-%m-%d %H:%M:%S
+
+ # (Optional) Name of log file to send logging output to. If no default is set,
+ # logging will go to stderr as defined by use_stderr. This option is ignored if
+ # log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logfile
+ #log_file = /var/log/kuryr/kuryr-controller.log
+
+ # (Optional) The base directory used for relative log_file paths. This option
+ # is ignored if log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logdir
+ #log_dir = <None>
+
+ # Uses logging handler designed to watch file system. When log file is moved or
+ # removed this handler will open a new log file with specified path
+ # instantaneously. It makes sense only if log_file option is specified and
+ # Linux platform is used. This option is ignored if log_config_append is set.
+ # (boolean value)
+ #watch_log_file = false
+
+ # Use syslog for logging. Existing syslog format is DEPRECATED and will be
+ # changed later to honor RFC5424. This option is ignored if log_config_append
+ # is set. (boolean value)
+ #use_syslog = false
+
+ # Syslog facility to receive log lines. This option is ignored if
+ # log_config_append is set. (string value)
+ #syslog_log_facility = LOG_USER
+
+ # Log output to standard error. This option is ignored if log_config_append is
+ # set. (boolean value)
+ #use_stderr = true
+
+ # Format string to use for log messages with context. (string value)
+ #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+ # Format string to use for log messages when context is undefined. (string
+ # value)
+ #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+ # Additional data to append to log message when logging level for the message
+ # is DEBUG. (string value)
+ #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+ # Prefix each line of exception output with this format. (string value)
+ #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+ # Defines the format string for %(user_identity)s that is used in
+ # logging_context_format_string. (string value)
+ #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+ # List of package logging levels in logger=LEVEL pairs. This option is ignored
+ # if log_config_append is set. (list value)
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+ # Enables or disables publication of error events. (boolean value)
+ #publish_errors = false
+
+ # The format for an instance that is passed with the log message. (string
+ # value)
+ #instance_format = "[instance: %(uuid)s] "
+
+ # The format for an instance UUID that is passed with the log message. (string
+ # value)
+ #instance_uuid_format = "[instance: %(uuid)s] "
+
+ # Enables or disables fatal status of deprecations. (boolean value)
+ #fatal_deprecations = false
+
+
+ [binding]
+
+ driver = kuryr.lib.binding.drivers.vlan
+ link_iface = eth0
+
+ [kubernetes]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The root URL of the Kubernetes API (string value)
+ api_root = {{ openshift.master.api_url }}
+
+ # Absolute path to client cert to connect to HTTPS K8S_API (string value)
+ # ssl_client_crt_file = /etc/kuryr/controller.crt
+
+ # Absolute path client key file to connect to HTTPS K8S_API (string value)
+ # ssl_client_key_file = /etc/kuryr/controller.key
+
+ # Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
+ ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+
+ # The token to talk to the k8s API
+ token_file = /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ # HTTPS K8S_API server identity verification (boolean value)
+ # TODO (apuimedo): Make configurable
+ ssl_verify_server_crt = True
+
+ # The driver to determine OpenStack project for pod ports (string value)
+ pod_project_driver = default
+
+ # The driver to determine OpenStack project for services (string value)
+ service_project_driver = default
+
+ # The driver to determine Neutron subnets for pod ports (string value)
+ pod_subnets_driver = default
+
+ # The driver to determine Neutron subnets for services (string value)
+ service_subnets_driver = default
+
+ # The driver to determine Neutron security groups for pods (string value)
+ pod_security_groups_driver = default
+
+ # The driver to determine Neutron security groups for services (string value)
+ service_security_groups_driver = default
+
+ # The driver that provides VIFs for Kubernetes Pods. (string value)
+ pod_vif_driver = nested-vlan
+
+
+ [neutron]
+ # Configuration options for OpenStack Neutron
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Authentication URL (string value)
+ auth_url = {{ kuryr_openstack_auth_url }}
+
+ # Authentication type to load (string value)
+ # Deprecated group/name - [neutron]/auth_plugin
+ auth_type = password
+
+ # Domain ID to scope to (string value)
+ user_domain_name = {{ kuryr_openstack_user_domain_name }}
+
+ # User's password (string value)
+ password = {{ kuryr_openstack_password }}
+
+ # Domain name containing project (string value)
+ project_domain_name = {{ kuryr_openstack_project_domain_name }}
+
+ # Project ID to scope to (string value)
+ # Deprecated group/name - [neutron]/tenant-id
+ project_id = {{ kuryr_openstack_project_id }}
+
+ # Token (string value)
+ #token = <None>
+
+ # Trust ID (string value)
+ #trust_id = <None>
+
+ # User's domain id (string value)
+ #user_domain_id = <None>
+
+ # User id (string value)
+ #user_id = <None>
+
+ # Username (string value)
+ # Deprecated group/name - [neutron]/user-name
+ username = {{kuryr_openstack_username }}
+
+ # Whether a plugging operation is failed if the port to plug does not become
+ # active (boolean value)
+ #vif_plugging_is_fatal = false
+
+ # Seconds to wait for port to become active (integer value)
+ #vif_plugging_timeout = 0
+
+ [neutron_defaults]
+
+ pod_security_groups = {{ kuryr_openstack_pod_sg_id }}
+ pod_subnet = {{ kuryr_openstack_pod_subnet_id }}
+ service_subnet = {{ kuryr_openstack_service_subnet_id }}
+ project = {{ kuryr_openstack_pod_project_id }}
+ # TODO (apuimedo): Remove the duplicated line just after this one once the
+ # RDO packaging contains the upstream patch
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+ [pod_vif_nested]
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+ kuryr-cni.conf: |+
+ [DEFAULT]
+
+ #
+ # From kuryr_kubernetes
+ #
+ # If set to true, the logging level will be set to DEBUG instead of the default
+ # INFO level. (boolean value)
+ # Note: This option can be changed without restarting.
+ #debug = false
+
+ # The name of a logging configuration file. This file is appended to any
+ # existing logging configuration files. For details about logging configuration
+ # files, see the Python logging module documentation. Note that when logging
+ # configuration files are used then all logging configuration is set in the
+ # configuration file and other logging configuration options are ignored (for
+ # example, logging_context_format_string). (string value)
+ # Note: This option can be changed without restarting.
+ # Deprecated group/name - [DEFAULT]/log_config
+ #log_config_append = <None>
+
+ # Defines the format string for %%(asctime)s in log records. Default:
+ # %(default)s . This option is ignored if log_config_append is set. (string
+ # value)
+ #log_date_format = %Y-%m-%d %H:%M:%S
+
+ # (Optional) Name of log file to send logging output to. If no default is set,
+ # logging will go to stderr as defined by use_stderr. This option is ignored if
+ # log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logfile
+ #log_file = /var/log/kuryr/cni.log
+
+ # (Optional) The base directory used for relative log_file paths. This option
+ # is ignored if log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logdir
+ #log_dir = <None>
+
+ # Uses logging handler designed to watch file system. When log file is moved or
+ # removed this handler will open a new log file with specified path
+ # instantaneously. It makes sense only if log_file option is specified and
+ # Linux platform is used. This option is ignored if log_config_append is set.
+ # (boolean value)
+ #watch_log_file = false
+
+ # Use syslog for logging. Existing syslog format is DEPRECATED and will be
+ # changed later to honor RFC5424. This option is ignored if log_config_append
+ # is set. (boolean value)
+ #use_syslog = false
+
+ # Syslog facility to receive log lines. This option is ignored if
+ # log_config_append is set. (string value)
+ #syslog_log_facility = LOG_USER
+
+ # Log output to standard error. This option is ignored if log_config_append is
+ # set. (boolean value)
+ use_stderr = true
+
+ # Format string to use for log messages with context. (string value)
+ #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+ # Format string to use for log messages when context is undefined. (string
+ # value)
+ #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+ # Additional data to append to log message when logging level for the message
+ # is DEBUG. (string value)
+ #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+ # Prefix each line of exception output with this format. (string value)
+ #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+ # Defines the format string for %(user_identity)s that is used in
+ # logging_context_format_string. (string value)
+ #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+ # List of package logging levels in logger=LEVEL pairs. This option is ignored
+ # if log_config_append is set. (list value)
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+ # Enables or disables publication of error events. (boolean value)
+ #publish_errors = false
+
+ # The format for an instance that is passed with the log message. (string
+ # value)
+ #instance_format = "[instance: %(uuid)s] "
+
+ # The format for an instance UUID that is passed with the log message. (string
+ # value)
+ #instance_uuid_format = "[instance: %(uuid)s] "
+
+ # Enables or disables fatal status of deprecations. (boolean value)
+ #fatal_deprecations = false
+
+
+ [binding]
+
+ driver = kuryr.lib.binding.drivers.vlan
+ link_iface = {{ kuryr_cni_link_interface }}
+
+ [kubernetes]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The root URL of the Kubernetes API (string value)
+ api_root = {{ openshift.master.api_url }}
+
+ # The token to talk to the k8s API
+ token_file = /etc/kuryr/token
+
+ # Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
+ ssl_ca_crt_file = /etc/kuryr/ca.crt
+
+ # HTTPS K8S_API server identity verification (boolean value)
+ # TODO (apuimedo): Make configurable
+ ssl_verify_server_crt = True
diff --git a/roles/kuryr/templates/controller-deployment.yaml.j2 b/roles/kuryr/templates/controller-deployment.yaml.j2
new file mode 100644
index 000000000..d970270b5
--- /dev/null
+++ b/roles/kuryr/templates/controller-deployment.yaml.j2
@@ -0,0 +1,40 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ name: kuryr-controller
+ name: kuryr-controller
+ namespace: {{ kuryr_namespace }}
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ name: kuryr-controller
+ name: kuryr-controller
+ spec:
+ serviceAccountName: kuryr-controller
+ automountServiceAccountToken: true
+ hostNetwork: true
+ containers:
+ - image: kuryr/controller:latest
+ imagePullPolicy: IfNotPresent
+ name: controller
+ terminationMessagePath: "/dev/termination-log"
+ # FIXME(dulek): This shouldn't be required, but without it selinux is
+ # complaining about access to kuryr.conf.
+ securityContext:
+ privileged: true
+ runAsUser: 0
+ volumeMounts:
+ - name: config-volume
+ mountPath: "/etc/kuryr/kuryr.conf"
+ subPath: kuryr.conf
+ volumes:
+ - name: config-volume
+ configMap:
+ name: kuryr-config
+ defaultMode: 0666
+ restartPolicy: Always
diff --git a/roles/openshift_atomic/README.md b/roles/openshift_atomic/README.md
new file mode 100644
index 000000000..8c10c9991
--- /dev/null
+++ b/roles/openshift_atomic/README.md
@@ -0,0 +1,28 @@
+OpenShift Atomic
+================
+
+This role houses atomic specific tasks.
+
+Requirements
+------------
+
+Role Variables
+--------------
+
+Dependencies
+------------
+
+Example Playbook
+----------------
+
+```
+- name: Ensure atomic proxies are defined
+ hosts: localhost
+ roles:
+ - role: openshift_atomic
+```
+
+License
+-------
+
+Apache License Version 2.0
diff --git a/roles/openshift_atomic/meta/main.yml b/roles/openshift_atomic/meta/main.yml
new file mode 100644
index 000000000..ea129f514
--- /dev/null
+++ b/roles/openshift_atomic/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: OpenShift
+ description: Atomic related tasks
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_atomic/tasks/proxy.yml b/roles/openshift_atomic/tasks/proxy.yml
new file mode 100644
index 000000000..dde099984
--- /dev/null
+++ b/roles/openshift_atomic/tasks/proxy.yml
@@ -0,0 +1,32 @@
+---
+# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf
+# regexp: the line starts with or without #, followed by the string
+# http_proxy, then either : or =
+- block:
+
+ - name: Add http_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?http_proxy[:=]{1}"
+ line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
+ when:
+ - openshift.common.http_proxy is defined
+ - openshift.common.http_proxy != ''
+
+ - name: Add https_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?https_proxy[:=]{1}"
+ line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
+ when:
+ - openshift.common.https_proxy is defined
+ - openshift.common.https_proxy != ''
+
+ - name: Add no_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?no_proxy[:=]{1}"
+ line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
+ when:
+ - openshift.common.no_proxy is defined
+ - openshift.common.no_proxy != ''
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 9e61805f9..14d8a3325 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,9 @@
---
- set_fact:
- l_use_crio: "{{ openshift_use_crio | default(false) }}"
+ l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}"
+ l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool }}"
+- set_fact:
+ l_use_cli_atomic_image: "{{ l_use_crio_only or l_is_system_container_image }}"
- name: Install clients
package: name={{ openshift.common.service_type }}-clients state=present
@@ -20,23 +23,23 @@
backend: "docker"
when:
- openshift.common.is_containerized | bool
- - not l_use_crio
+ - not l_use_cli_atomic_image | bool
- block:
- name: Pull CLI Image
command: >
- atomic pull --storage ostree {{ openshift.common.system_images_registry }}/{{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ atomic pull --storage ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
- image: "{{ openshift.common.system_images_registry }}/{{ openshift.common.cli_image }}"
+ image: "{{ '' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}"
tag: "{{ openshift_image_tag }}"
backend: "atomic"
when:
- openshift.common.is_containerized | bool
- - l_use_crio
+ - l_use_cli_atomic_image | bool
- name: Reload facts to pick up installed OpenShift version
openshift_facts:
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
index 64c7cd019..5ed6d9f84 100644
--- a/roles/openshift_gcp/templates/provision.j2.sh
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -125,10 +125,11 @@ fi ) &
if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \
--machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \
- --tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ node_group.tags }}" \
+ --tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ 'ocp-bootstrap,' if (node_group.bootstrap | default(False)) else '' }}{{ node_group.tags }}" \
--boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
--scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
- --image "${image}" ${metadata}
+ --image "{{ node_group.image | default('${image}') }}" ${metadata} \
+ --metadata "bootstrap={{ node_group.bootstrap | default(False) | bool | to_json }},cluster-id={{ openshift_gcp_prefix + openshift_gcp_clusterid }},node-group={{ node_group.name }}"
else
echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists"
fi
@@ -312,8 +313,12 @@ fi
# wait until all node groups are stable
{% for node_group in openshift_gcp_node_group_config %}
+{% if node_group.bootstrap | default(False) %}
+# not waiting for {{ node_group.name }} due to bootstrapping
+{% else %}
# wait for stable {{ node_group.name }}
( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) &
+{% endif %}
{% endfor %}
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 326176273..3ee3b132c 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -101,7 +101,8 @@ class ActionModule(ActionBase):
execute_module=self._execute_module,
tmp=tmp,
task_vars=task_vars,
- want_full_results=want_full_results
+ want_full_results=want_full_results,
+ templar=self._templar
)
return known_checks
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index ce05b44a4..b7b16e0ea 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -65,12 +65,15 @@ class OpenShiftCheck(object):
If the check can gather logs, tarballs, etc., do so when True; but no need to spend
the time if they're not wanted (won't be written to output directory).
"""
-
- def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False):
+ # pylint: disable=too-many-arguments
+ def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False,
+ templar=None):
# store a method for executing ansible modules from the check
self._execute_module = execute_module
# the task variables and tmpdir passed into the health checker task
self.task_vars = task_vars or {}
+ # We may need to template some task_vars
+ self._templar = templar
self.tmp = tmp
# a boolean for disabling the gathering of results (files, computations) that won't
# actually be recorded/used
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 7c8ac78fe..5beb20503 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -61,10 +61,15 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# for the oreg_url registry there may be credentials specified
components = self.get_var("oreg_url", default="").split('/')
self.registries["oreg"] = "" if len(components) < 3 else components[0]
+
+ # Retrieve and template registry credentials, if provided
self.skopeo_command_creds = ""
oreg_auth_user = self.get_var('oreg_auth_user', default='')
oreg_auth_password = self.get_var('oreg_auth_password', default='')
if oreg_auth_user != '' and oreg_auth_password != '':
+ if self._templar is not None:
+ oreg_auth_user = self._templar.template(oreg_auth_user)
+ oreg_auth_password = self._templar.template(oreg_auth_password)
self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password))
# record whether we could reach a registry or not (and remember results)
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 69eb9283d..0ea34faf2 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -44,23 +44,23 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_curator_run_timezone`: The timezone that Curator uses for figuring out its run time. Defaults to 'UTC'.
- `openshift_logging_curator_script_log_level`: The script log level for Curator. Defaults to 'INFO'.
- `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'.
-- `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'.
+- `openshift_logging_curator_cpu_request`: The minimum amount of CPU to allocate to Curator. Default is '100m'.
- `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified.
- `openshift_logging_curator_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the curator pod will land.
- `openshift_logging_image_pull_secret`: The name of an existing pull secret to link to the logging service accounts
- `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'.
-- `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_cpu_request`: The minimum amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_proxy_debug`: When "True", set the Kibana Proxy log level to DEBUG. Defaults to 'false'.
-- `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_proxy_cpu_request`: The minimum amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1.
- `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
- `openshift_logging_kibana_edge_term_policy`: Insecure Edge Termination Policy. Defaults to Redirect.
- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
-- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
+- `openshift_logging_fluentd_cpu_request`: The minimum amount of CPU to allocate for Fluentd collector pods. Defaults to '100m'.
- `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'.
- `openshift_logging_fluentd_use_journal`: *DEPRECATED - DO NOT USE* Fluentd will automatically detect whether or not Docker is using the journald log driver.
- `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false.
@@ -80,7 +80,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_es_client_key`: The location of the client key Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/key'.
- `openshift_logging_es_cluster_size`: The number of ES cluster members. Defaults to '1'.
-- `openshift_logging_es_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_cpu_request`: The minimum amount of CPU to allocate for an ES pod cluster member. Defaults to 1 CPU.
- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '8Gi'.
- `openshift_logging_es_log_appenders`: The list of rootLogger appenders for ES logs which can be: 'file', 'console'. Defaults to 'file'.
- `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'.
@@ -107,7 +107,7 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_client_cert`: /etc/fluent/keys/cert
- `openshift_logging_es_ops_client_key`: /etc/fluent/keys/key
- `openshift_logging_es_ops_cluster_size`: 1
-- `openshift_logging_es_ops_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_ops_cpu_request`: The minimum amount of CPU to allocate for an ES ops pod cluster member. Defaults to 1 CPU.
- `openshift_logging_es_ops_memory_limit`: 8Gi
- `openshift_logging_es_ops_pvc_dynamic`: False
- `openshift_logging_es_ops_pvc_size`: ""
@@ -115,9 +115,9 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_recover_after_time`: 5m
- `openshift_logging_es_ops_storage_group`: 65534
- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.
-- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_ops_cpu_request`: The minimum amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
-- `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_ops_proxy_cpu_request`: The minimum amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_replica_count`: The number of replicas Kibana ops should be scaled up to. Defaults to 1.
@@ -176,7 +176,7 @@ Elasticsearch OPS too, if using an OPS cluster:
clients will use to connect to mux, and will be used in the TLS server cert
subject.
- `openshift_logging_mux_port`: 24284
-- `openshift_logging_mux_cpu_limit`: 100m
+- `openshift_logging_mux_cpu_request`: 100m
- `openshift_logging_mux_memory_limit`: 512Mi
- `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the
first value in the list is the namespace to use for undefined projects,
@@ -225,3 +225,78 @@ The corresponding openshift\_logging\_mux\_* parameters are below.
- `openshift_logging_mux_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message
- `openshift_logging_mux_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false`
- `openshift_logging_mux_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message
+
+Image update procedure
+----------------------
+An upgrade of the logging stack from older version to newer is an automated process and should be performed by calling appropriate ansible playbook and setting required ansible variables in your inventory as documented in https://docs.openshift.org/.
+
+Following text describes manual update of the logging images without version upgrade. To determine the current version of images being used you can.
+```
+oc describe pod | grep 'Image ID:'
+```
+This will get the repo digest that can later be compared to the inspected image details.
+
+A way to determine when was your image last updated:
+```
+$ docker images
+REPOSITORY TAG IMAGE ID CREATED SIZE
+<registry>/openshift3/logging-fluentd v3.7 ff2e249fc45a About an hour ago 235.2 MB
+
+$ docker inspect ff2e249fc45a
+[
+ {
+ . . .
+ "RepoDigests": [
+ "<registry>/openshift3/logging-fluentd@sha256:4346f0aa9694f32735115705ad324803b1a6ff08343c3288f7a62c3a5cb70495"
+ ],
+ . . .
+ "Config": {
+ . . .
+ "Labels": {
+ . . .
+ "build-date": "2017-10-12T14:38:22.414827",
+ . . .
+ "release": "0.143.3.0",
+ . . .
+ "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/openshift3/logging-fluentd/images/v3.7.0-0.143.3.0",
+ . . .
+ "version": "v3.7.0"
+ }
+ },
+ . . .
+```
+
+Pull a new image to see if registry has any newer images with the same tag:
+```
+$ docker pull <registry>/openshift3/logging-fluentd:v3.7
+```
+
+If there was an update, you need to run the `docker pull` on each node.
+
+It is recommended that you now rerun the `openshift_logging` playbook to ensure that any necessary config changes are also picked up.
+
+To manually redeploy your pod you can do the following:
+- for a DC you can do:
+```
+oc rollout latest <dc_name>
+```
+
+- for a RC you can scale down and scale back up
+```
+oc scale --replicas=0 <rc_name>
+
+... wait for scale down
+
+oc scale --replicas=<original_replica_count> <rc_name>
+```
+
+- for a DS you can delete the pod or unlabel and relabel your node
+```
+oc delete pod --selector=<ds_selector>
+```
+
+Changelog
+---------
+
+Tue Oct 10, 2017
+- Default imagePullPolicy changed from Always to IfNotPresent
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 6e7e2557f..626732d16 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -18,20 +18,24 @@ openshift_logging_curator_run_minute: 0
openshift_logging_curator_run_timezone: UTC
openshift_logging_curator_script_log_level: INFO
openshift_logging_curator_log_level: ERROR
-openshift_logging_curator_cpu_limit: 100m
-openshift_logging_curator_memory_limit: null
+openshift_logging_curator_cpu_limit: null
+openshift_logging_curator_memory_limit: 256Mi
+openshift_logging_curator_cpu_request: 100m
openshift_logging_curator_nodeselector: {}
-openshift_logging_curator_ops_cpu_limit: 100m
-openshift_logging_curator_ops_memory_limit: null
+openshift_logging_curator_ops_cpu_limit: null
+openshift_logging_curator_ops_memory_limit: 256Mi
+openshift_logging_curator_ops_cpu_request: 100m
openshift_logging_curator_ops_nodeselector: {}
openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: 736Mi
+openshift_logging_kibana_cpu_request: 100m
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
openshift_logging_kibana_proxy_memory_limit: 256Mi
+openshift_logging_kibana_proxy_cpu_request: 100m
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
@@ -53,9 +57,11 @@ openshift_logging_kibana_ca: ""
openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: 736Mi
+openshift_logging_kibana_ops_cpu_request: 100m
openshift_logging_kibana_ops_proxy_debug: false
openshift_logging_kibana_ops_proxy_cpu_limit: null
openshift_logging_kibana_ops_proxy_memory_limit: 256Mi
+openshift_logging_kibana_ops_proxy_cpu_request: 100m
openshift_logging_kibana_ops_replica_count: 1
#The absolute path on the control node to the cert file to use
@@ -71,13 +77,14 @@ openshift_logging_kibana_ops_key: ""
openshift_logging_kibana_ops_ca: ""
openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}
-openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_cpu_limit: null
openshift_logging_fluentd_memory_limit: 512Mi
+openshift_logging_fluentd_cpu_request: 100m
openshift_logging_fluentd_journal_source: ""
openshift_logging_fluentd_journal_read_from_head: ""
openshift_logging_fluentd_hosts: ['--all']
-openshift_logging_fluentd_buffer_queue_limit: 1024
-openshift_logging_fluentd_buffer_size_limit: 1m
+openshift_logging_fluentd_buffer_queue_limit: 32
+openshift_logging_fluentd_buffer_size_limit: 8m
openshift_logging_es_host: logging-es
openshift_logging_es_port: 9200
@@ -85,7 +92,8 @@ openshift_logging_es_ca: /etc/fluent/keys/ca
openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
openshift_logging_es_cluster_size: 1
-openshift_logging_es_cpu_limit: 1000m
+openshift_logging_es_cpu_limit: null
+openshift_logging_es_cpu_request: "1"
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "8Gi"
@@ -98,8 +106,6 @@ openshift_logging_es_storage_group: "65534"
openshift_logging_es_nodeselector: {}
# openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml
openshift_logging_es_config: {}
-openshift_logging_es_number_of_shards: 1
-openshift_logging_es_number_of_replicas: 0
# for exposing es to external (outside of the cluster) clients
openshift_logging_es_allow_external: False
@@ -126,8 +132,9 @@ openshift_logging_es_ops_ca: /etc/fluent/keys/ca
openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_logging_elasticsearch_ops_cluster_size | default(1) }}"
-openshift_logging_es_ops_cpu_limit: 1000m
-openshift_logging_es_ops_memory_limit: "8Gi"
+openshift_logging_es_ops_cpu_limit: null
+openshift_logging_es_ops_memory_limit: 8Gi
+openshift_logging_es_ops_cpu_request: "1"
openshift_logging_es_ops_pv_selector: "{{ openshift_loggingops_storage_labels | default('') }}"
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}"
@@ -160,8 +167,9 @@ openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_mux_port: 24284
-openshift_logging_mux_cpu_limit: 500m
-openshift_logging_mux_memory_limit: 1Gi
+openshift_logging_mux_cpu_limit: null
+openshift_logging_mux_memory_limit: 512Mi
+openshift_logging_mux_cpu_request: 100m
# the namespace to use for undefined projects should come first, followed by any
# additional namespaces to create by default - users will typically not need to set this
openshift_logging_mux_default_namespaces: ["mux-undefined"]
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index 330e7e59a..959573635 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -17,6 +17,22 @@ def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
return dict(kind='emptydir')
+def walk(source, path, default, delimiter='.'):
+ '''Walk the sourch hash given the path and return the value or default if not found'''
+ if not isinstance(source, dict):
+ raise RuntimeError('The source is not a walkable dict: {} path: {}'.format(source, path))
+ keys = path.split(delimiter)
+ max_depth = len(keys)
+ cur_depth = 0
+ while cur_depth < max_depth:
+ if keys[cur_depth] in source:
+ source = source[keys[cur_depth]]
+ cur_depth = cur_depth + 1
+ else:
+ return default
+ return source
+
+
def random_word(source_alpha, length):
''' Returns a random word given the source of characters to pick from and resulting length '''
return ''.join(random.choice(source_alpha) for i in range(length))
@@ -73,5 +89,6 @@ class FilterModule(object):
'map_from_pairs': map_from_pairs,
'es_storage': es_storage,
'serviceaccount_name': serviceaccount_name,
- 'serviceaccount_namespace': serviceaccount_namespace
+ 'serviceaccount_namespace': serviceaccount_namespace,
+ 'walk': walk
}
diff --git a/roles/openshift_logging/filter_plugins/test b/roles/openshift_logging/filter_plugins/test
new file mode 100644
index 000000000..3ad956cca
--- /dev/null
+++ b/roles/openshift_logging/filter_plugins/test
@@ -0,0 +1,34 @@
+import unittest
+from openshift_logging import walk
+
+class TestFilterMethods(unittest.TestCase):
+
+
+ def test_walk_find_key(self):
+ source = {'foo': {'bar.xyz': 'myvalue'}}
+ self.assertEquals(walk(source,'foo#bar.xyz', 123, delimiter='#'), 'myvalue')
+
+
+ def test_walk_return_default(self):
+ source = {'foo': {'bar.xyz': 'myvalue'}}
+ self.assertEquals(walk(source,'foo#bar.abc', 123, delimiter='#'), 123)
+
+
+ def test_walk_limit_max_depth(self):
+ source = {'foo': {'bar.xyz': 'myvalue'}}
+ self.assertEquals(walk(source,'foo#bar.abc#dontfindme', 123, delimiter='#'), 123)
+
+ def test_complex_hash(self):
+ source = {
+ 'elasticsearch': {
+ 'configmaps': {
+ 'logging-elasticsearch': {
+ 'elasticsearch.yml': "a string value"
+ }
+ }
+ }
+ }
+ self.assertEquals(walk(source,'elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', 123, delimiter='#'), "a string value")
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index f10df8da5..98d0d1c4f 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -207,7 +207,7 @@ class OpenshiftLoggingFacts(OCBaseCommand):
def facts_for_configmaps(self, namespace):
''' Gathers facts for configmaps in logging namespace '''
self.default_keys_for("configmaps")
- a_list = self.oc_command("get", "configmaps", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
+ a_list = self.oc_command("get", "configmaps", namespace=namespace)
if len(a_list["items"]) == 0:
return
for item in a_list["items"]:
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 21fd79c28..76627acf2 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -78,6 +78,7 @@
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}"
openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}"
_es_containers: "{{item.0.containers}}"
+ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
@@ -133,6 +134,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}"
openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
@@ -141,7 +143,10 @@
openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}"
openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}"
openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
+ openshift_logging_es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards | default(None) }}"
+ openshift_logging_es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas | default(None) }}"
_es_containers: "{{item.0.containers}}"
+ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
@@ -167,6 +172,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
@@ -207,11 +213,13 @@
openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}"
openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}"
openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}"
+ openshift_logging_kibana_cpu_request: "{{ openshift_logging_kibana_ops_cpu_request }}"
openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}"
openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}"
openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}"
openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}"
openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}"
+ openshift_logging_kibana_proxy_cpu_request: "{{ openshift_logging_kibana_ops_proxy_cpu_request }}"
openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}"
openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}"
openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}"
@@ -243,6 +251,7 @@
openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"
+ openshift_logging_curator_cpu_request: "{{ openshift_logging_curator_ops_cpu_request }}"
openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}"
openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}"
when:
diff --git a/roles/openshift_logging/templates/jks_pod.j2 b/roles/openshift_logging/templates/jks_pod.j2
index 8b1c74211..e4110b7b3 100644
--- a/roles/openshift_logging/templates/jks_pod.j2
+++ b/roles/openshift_logging/templates/jks_pod.j2
@@ -8,7 +8,7 @@ spec:
containers:
- name: jks-cert-gen
image: {{openshift_logging_image_prefix}}logging-deployer:{{openshift_logging_image_version}}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
command: ["sh", "{{generated_certs_dir}}/generate-jks.sh"]
securityContext:
privileged: true
diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml
index 17807b644..9cae9f936 100644
--- a/roles/openshift_logging_curator/defaults/main.yml
+++ b/roles/openshift_logging_curator/defaults/main.yml
@@ -9,8 +9,9 @@ openshift_logging_curator_namespace: logging
### Common settings
openshift_logging_curator_nodeselector: ""
-openshift_logging_curator_cpu_limit: 100m
-openshift_logging_curator_memory_limit: null
+openshift_logging_curator_cpu_limit: null
+openshift_logging_curator_cpu_request: 100m
+openshift_logging_curator_memory_limit: 256Mi
openshift_logging_curator_es_host: "logging-es"
openshift_logging_curator_es_port: 9200
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index 6e8fab2b5..b4ddf45d9 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -90,6 +90,7 @@
es_host: "{{ openshift_logging_curator_es_host }}"
es_port: "{{ openshift_logging_curator_es_port }}"
curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
+ curator_cpu_request: "{{ openshift_logging_curator_cpu_request }}"
curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"
curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index e74918a40..462128366 100644
--- a/roles/openshift_logging_curator/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -38,14 +38,27 @@ spec:
-
name: "curator"
image: {{image}}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %}
resources:
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") %}
limits:
+{% if curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "" %}
cpu: "{{curator_cpu_limit}}"
-{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
+{% endif %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %}
requests:
+{% if curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "" %}
+ cpu: "{{curator_cpu_request}}"
+{% endif %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+{% endif %}
+{% endif %}
{% endif %}
env:
-
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
index fc48b7f71..9fc6fd1d8 100644
--- a/roles/openshift_logging_elasticsearch/defaults/main.yml
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -6,7 +6,8 @@ openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_
openshift_logging_elasticsearch_namespace: logging
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector | default('') }}"
-openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_cpu_limit | default('1000m') }}"
+openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_cpu_limit | default('') }}"
+openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_cpu_request | default('1000m') }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_memory_limit | default('1Gi') }}"
openshift_logging_elasticsearch_recover_after_time: "{{ openshift_logging_es_recover_after_time | default('5m') }}"
@@ -40,7 +41,7 @@ openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_
# config the es plugin to write kibana index based on the index mode
openshift_logging_elasticsearch_kibana_index_mode: 'unique'
-openshift_logging_elasticsearch_proxy_cpu_limit: "100m"
+openshift_logging_elasticsearch_proxy_cpu_request: "100m"
openshift_logging_elasticsearch_proxy_memory_limit: "64Mi"
openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index aeff2d198..7aabdc861 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -164,13 +164,17 @@
when: es_logging_contents is undefined
changed_when: no
+- set_fact:
+ __es_num_of_shards: "{{ _es_configmap | default({}) | walk('index.number_of_shards', '1') }}"
+ __es_num_of_replicas: "{{ _es_configmap | default({}) | walk('index.number_of_replicas', '0') }}"
+
- template:
src: elasticsearch.yml.j2
dest: "{{ tempdir }}/elasticsearch.yml"
vars:
allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}"
- es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
- es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(None) or __es_num_of_shards }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(None) or __es_num_of_replicas }}"
es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}"
when: es_config_contents is undefined
@@ -349,7 +353,8 @@
deploy_name: "{{ es_deploy_name }}"
image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"
proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}"
- es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
+ es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit | default('') }}"
+ es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request }}"
es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}"
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index ce3b2eb83..0c7d8b46e 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -41,7 +41,7 @@ spec:
containers:
- name: proxy
image: {{ proxy_image }}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
args:
- --upstream-ca=/etc/elasticsearch/secret/admin-ca
- --https-address=:4443
@@ -69,21 +69,22 @@ spec:
readOnly: true
resources:
limits:
- cpu: "{{openshift_logging_elasticsearch_proxy_cpu_limit }}"
memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
requests:
+ cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}"
memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
-
name: "elasticsearch"
image: {{image}}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
resources:
limits:
- memory: "{{es_memory_limit}}"
-{% if es_cpu_limit is defined and es_cpu_limit is not none %}
+{% if es_cpu_limit is defined and es_cpu_limit is not none and es_cpu_limit != '' %}
cpu: "{{es_cpu_limit}}"
{% endif %}
+ memory: "{{es_memory_limit}}"
requests:
+ cpu: "{{es_cpu_request}}"
memory: "{{es_memory_limit}}"
{% if es_container_security_context %}
securityContext: {{ es_container_security_context | to_yaml }}
diff --git a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
index c87d48e27..2fd960bb5 100644
--- a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
-__openshift_logging_elasticsearch_proxy_image_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
__openshift_logging_elasticsearch_proxy_image_version: "v3.7"
diff --git a/roles/openshift_logging_eventrouter/README.md b/roles/openshift_logging_eventrouter/README.md
index da313d68b..611bdaee0 100644
--- a/roles/openshift_logging_eventrouter/README.md
+++ b/roles/openshift_logging_eventrouter/README.md
@@ -3,9 +3,9 @@ Event router
A pod forwarding kubernetes events to EFK aggregated logging stack.
-- **eventrouter** is deployed to logging project, has a service account and its own role to read events
+- **eventrouter** is deployed to default project, has a service account and its own role to read events
- **eventrouter** watches kubernetes events, marshalls them to JSON and outputs to its sink, currently only various formatting to STDOUT
-- **fluentd** picks them up and inserts to elasticsearch *.operations* index
+- **fluentd** ingests as logs from **eventrouter** container (as it would any other container), and writes them to the appropriate index for the **eventrouter**'s namespace (in the 'default' namespace, the *.operations* index is used)
- `openshift_logging_install_eventrouter`: When 'True', eventrouter will be installed. When 'False', eventrouter will be uninstalled.
@@ -15,6 +15,6 @@ Configuration variables:
- `openshift_logging_eventrouter_image_version`: The image version for the logging eventrouter. Defaults to 'latest'.
- `openshift_logging_eventrouter_sink`: Select a sink for eventrouter, supported 'stdout' and 'glog'. Defaults to 'stdout'.
- `openshift_logging_eventrouter_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
-- `openshift_logging_eventrouter_cpu_limit`: The amount of CPU to allocate to eventrouter. Defaults to '100m'.
+- `openshift_logging_eventrouter_cpu_request`: The minimum amount of CPU to allocate to eventrouter. Defaults to '100m'.
- `openshift_logging_eventrouter_memory_limit`: The memory limit for eventrouter pods. Defaults to '128Mi'.
- `openshift_logging_eventrouter_namespace`: The namespace where eventrouter is deployed. Defaults to 'default'.
diff --git a/roles/openshift_logging_eventrouter/defaults/main.yaml b/roles/openshift_logging_eventrouter/defaults/main.yaml
index 34e33f75f..4c0350c98 100644
--- a/roles/openshift_logging_eventrouter/defaults/main.yaml
+++ b/roles/openshift_logging_eventrouter/defaults/main.yaml
@@ -4,6 +4,7 @@ openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version
openshift_logging_eventrouter_replicas: 1
openshift_logging_eventrouter_sink: stdout
openshift_logging_eventrouter_nodeselector: ""
-openshift_logging_eventrouter_cpu_limit: 100m
+openshift_logging_eventrouter_cpu_limit: null
+openshift_logging_eventrouter_cpu_request: 100m
openshift_logging_eventrouter_memory_limit: 128Mi
openshift_logging_eventrouter_namespace: default
diff --git a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml
index 91708e54b..cc01c010d 100644
--- a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml
+++ b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml
@@ -56,7 +56,7 @@ objects:
containers:
- name: kube-eventrouter
image: ${IMAGE}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
resources:
limits:
memory: ${MEMORY}
diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
index 8df7435e2..cbbc6a8ec 100644
--- a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
+++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
@@ -45,7 +45,7 @@
params:
IMAGE: "{{openshift_logging_eventrouter_image_prefix}}logging-eventrouter:{{openshift_logging_eventrouter_image_version}}"
REPLICAS: "{{ openshift_logging_eventrouter_replicas }}"
- CPU: "{{ openshift_logging_eventrouter_cpu_limit }}"
+ CPU: "{{ openshift_logging_eventrouter_cpu_request }}"
MEMORY: "{{ openshift_logging_eventrouter_memory_limit }}"
NAMESPACE: "{{ openshift_logging_eventrouter_namespace }}"
SINK: "{{ openshift_logging_eventrouter_sink }}"
diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
index ea1fd3efd..5a4f7f762 100644
--- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
+++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
@@ -25,7 +25,7 @@ objects:
metadata:
name: logging-eventrouter
data:
- config.json: |-
+ config.json: |-
{
"sink": "${SINK}"
}
@@ -62,12 +62,12 @@ objects:
containers:
- name: kube-eventrouter
image: ${IMAGE}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
resources:
limits:
- memory: ${MEMORY}
- cpu: ${CPU}
+ memory: ${MEMORY}
requires:
+ cpu: ${CPU}
memory: ${MEMORY}
volumeMounts:
- name: config-volume
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index 25f7580a4..861935c99 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -8,7 +8,8 @@ openshift_logging_fluentd_namespace: logging
### Common settings
openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
-openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_cpu_limit: null
+openshift_logging_fluentd_cpu_request: 100m
openshift_logging_fluentd_memory_limit: 512Mi
openshift_logging_fluentd_hosts: ['--all']
@@ -55,7 +56,7 @@ openshift_logging_fluentd_aggregating_passphrase: none
#fluentd_throttle_contents:
#fluentd_secureforward_contents:
-openshift_logging_fluentd_file_buffer_limit: 1Gi
+openshift_logging_fluentd_file_buffer_limit: 256Mi
# Configure fluentd to tail audit log file and filter out container engine's logs from there
# These logs are then stored in ES operation index
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 06bb35dbc..f56810610 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -172,6 +172,9 @@
ops_port: "{{ openshift_logging_fluentd_ops_port }}"
fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
+ fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}"
+ fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request }}"
+ fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}"
audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}"
audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}"
audit_pos_log_file: "{{ openshift_logging_fluentd_audit_pos_file | default() }}"
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index 644b70031..10283316c 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -29,15 +29,30 @@ spec:
containers:
- name: "{{ daemonset_container_name }}"
image: "{{ openshift_logging_fluentd_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_fluentd_image_version }}"
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
securityContext:
privileged: true
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %}
resources:
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) %}
limits:
- cpu: {{ openshift_logging_fluentd_cpu_limit }}
- memory: {{ openshift_logging_fluentd_memory_limit }}
+{% if fluentd_cpu_limit is not none %}
+ cpu: "{{fluentd_cpu_limit}}"
+{% endif %}
+{% if fluentd_memory_limit is not none %}
+ memory: "{{fluentd_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %}
requests:
- memory: {{ openshift_logging_fluentd_memory_limit }}
+{% if fluentd_cpu_request is not none %}
+ cpu: "{{fluentd_cpu_request}}"
+{% endif %}
+{% if fluentd_memory_limit is not none %}
+ memory: "{{fluentd_memory_limit}}"
+{% endif %}
+{% endif %}
+{% endif %}
volumeMounts:
- name: runlogjournal
mountPath: /run/log/journal
@@ -115,7 +130,7 @@ spec:
containerName: "{{ daemonset_container_name }}"
resource: limits.memory
- name: "FILE_BUFFER_LIMIT"
- value: "{{ openshift_logging_fluentd_file_buffer_limit | default('1Gi') }}"
+ value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256i') }}"
{% if openshift_logging_mux_client_mode is defined and
((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
(openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
index ee265bb14..1366e96cd 100644
--- a/roles/openshift_logging_kibana/defaults/main.yml
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -9,6 +9,7 @@ openshift_logging_kibana_namespace: logging
openshift_logging_kibana_nodeselector: ""
openshift_logging_kibana_cpu_limit: null
+openshift_logging_kibana_cpu_request: 100m
openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
@@ -28,6 +29,7 @@ openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_image_prefix
openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default('latest') }}"
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
+openshift_logging_kibana_proxy_cpu_request: 100m
openshift_logging_kibana_proxy_memory_limit: 256Mi
#The absolute path on the control node to the cert file to use
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index e17e8c1f2..809f7a631 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -230,8 +230,10 @@
es_host: "{{ openshift_logging_kibana_es_host }}"
es_port: "{{ openshift_logging_kibana_es_port }}"
kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}"
+ kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request }}"
kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"
kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}"
+ kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request }}"
kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index da1386d3e..4ff86729a 100644
--- a/roles/openshift_logging_kibana/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -37,18 +37,27 @@ spec:
-
name: "kibana"
image: {{ image }}
- imagePullPolicy: Always
-{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}
+ imagePullPolicy: IfNotPresent
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %}
resources:
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}
limits:
-{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %}
+{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %}
cpu: "{{ kibana_cpu_limit }}"
-{% endif %}
-{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
+{% endif %}
+{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
memory: "{{ kibana_memory_limit }}"
+{% endif %}
+{% endif %}
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %}
requests:
+{% if kibana_cpu_request is not none and kibana_cpu_request != "" %}
+ cpu: "{{ kibana_cpu_request }}"
+{% endif %}
+{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
memory: "{{ kibana_memory_limit }}"
-{% endif %}
+{% endif %}
+{% endif %}
{% endif %}
env:
- name: "ES_HOST"
@@ -75,18 +84,27 @@ spec:
-
name: "kibana-proxy"
image: {{ proxy_image }}
- imagePullPolicy: Always
-{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}
+ imagePullPolicy: IfNotPresent
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %}
resources:
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}
limits:
-{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %}
+{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %}
cpu: "{{ kibana_proxy_cpu_limit }}"
-{% endif %}
-{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
+{% endif %}
+{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
memory: "{{ kibana_proxy_memory_limit }}"
+{% endif %}
+{% endif %}
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %}
requests:
+{% if kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "" %}
+ cpu: "{{ kibana_proxy_cpu_request }}"
+{% endif %}
+{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
memory: "{{ kibana_proxy_memory_limit }}"
-{% endif %}
+{% endif %}
+{% endif %}
{% endif %}
ports:
-
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index 68412aec8..9de686576 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -9,10 +9,11 @@ openshift_logging_mux_namespace: logging
### Common settings
openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}"
-openshift_logging_mux_cpu_limit: 500m
-openshift_logging_mux_memory_limit: 2Gi
-openshift_logging_mux_buffer_queue_limit: 1024
-openshift_logging_mux_buffer_size_limit: 1m
+openshift_logging_mux_cpu_limit: null
+openshift_logging_mux_cpu_request: 100m
+openshift_logging_mux_memory_limit: 512Mi
+openshift_logging_mux_buffer_queue_limit: 32
+openshift_logging_mux_buffer_size_limit: 8m
openshift_logging_mux_replicas: 1
@@ -57,11 +58,11 @@ openshift_logging_mux_file_buffer_storage_type: "emptydir"
openshift_logging_mux_file_buffer_pvc_name: "logging-mux-pvc"
# required if the PVC does not already exist
-openshift_logging_mux_file_buffer_pvc_size: 4Gi
+openshift_logging_mux_file_buffer_pvc_size: 1Gi
openshift_logging_mux_file_buffer_pvc_dynamic: false
openshift_logging_mux_file_buffer_pvc_pv_selector: {}
openshift_logging_mux_file_buffer_pvc_access_modes: ['ReadWriteOnce']
openshift_logging_mux_file_buffer_storage_group: '65534'
openshift_logging_mux_file_buffer_pvc_prefix: "logging-mux"
-openshift_logging_mux_file_buffer_limit: 2Gi
+openshift_logging_mux_file_buffer_limit: 256Mi
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 2ec863afa..1b46a7ac3 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -171,6 +171,7 @@
ops_host: "{{ openshift_logging_mux_ops_host }}"
ops_port: "{{ openshift_logging_mux_ops_port }}"
mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}"
+ mux_cpu_request: "{{ openshift_logging_mux_cpu_request }}"
mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"
mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}"
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 4cc48139f..cfb13d59b 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -36,18 +36,27 @@ spec:
containers:
- name: "mux"
image: {{image}}
- imagePullPolicy: Always
-{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
+ imagePullPolicy: IfNotPresent
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %}
resources:
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
limits:
-{% if mux_cpu_limit is not none %}
+{% if mux_cpu_limit is not none %}
cpu: "{{mux_cpu_limit}}"
-{% endif %}
-{% if mux_memory_limit is not none %}
+{% endif %}
+{% if mux_memory_limit is not none %}
memory: "{{mux_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %}
requests:
+{% if mux_cpu_request is not none %}
+ cpu: "{{mux_cpu_request}}"
+{% endif %}
+{% if mux_memory_limit is not none %}
memory: "{{mux_memory_limit}}"
-{% endif %}
+{% endif %}
+{% endif %}
{% endif %}
ports:
- containerPort: "{{ openshift_logging_mux_port }}"
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index b6875ebd4..3da861d03 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -46,6 +46,9 @@ r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}"
r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}"
+r_openshift_master_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+r_openshift_master_use_kuryr: "{{ r_openshift_master_use_kuryr_default }}"
+
r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}"
diff --git a/roles/openshift_master/tasks/journald.yml b/roles/openshift_master/tasks/journald.yml
new file mode 100644
index 000000000..f79955e95
--- /dev/null
+++ b/roles/openshift_master/tasks/journald.yml
@@ -0,0 +1,22 @@
+---
+- name: Checking for journald.conf
+ stat: path=/etc/systemd/journald.conf
+ register: journald_conf_file
+
+- name: Update journald setup
+ replace:
+ dest: /etc/systemd/journald.conf
+ regexp: '^(\#| )?{{ item.var }}=\s*.*?$'
+ replace: ' {{ item.var }}={{ item.val }}'
+ backup: yes
+ with_items: "{{ journald_vars_to_replace | default([]) }}"
+ when: journald_conf_file.stat.exists
+ register: journald_update
+
+# I need to restart journald immediatelly, otherwise it gets into way during
+# further steps in ansible
+- name: Restart journald
+ systemd:
+ name: systemd-journald
+ state: restarted
+ when: journald_update | changed
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 824a5886e..d0bc79c0c 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -177,31 +177,12 @@
local_facts:
no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
+- name: Update journald config
+ include: journald.yml
+
- name: Install the systemd units
include: systemd_units.yml
-- name: Checking for journald.conf
- stat: path=/etc/systemd/journald.conf
- register: journald_conf_file
-
-- name: Update journald setup
- replace:
- dest: /etc/systemd/journald.conf
- regexp: '^(\#| )?{{ item.var }}=\s*.*?$'
- replace: ' {{ item.var }}={{ item.val }}'
- backup: yes
- with_items: "{{ journald_vars_to_replace | default([]) }}"
- when: journald_conf_file.stat.exists
- register: journald_update
-
-# I need to restart journald immediatelly, otherwise it gets into way during
-# further steps in ansible
-- name: Restart journald
- systemd:
- name: systemd-journald
- state: restarted
- when: journald_update | changed
-
- name: Install Master system container
include: system_container.yml
when:
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 91332acfb..843352532 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -1,4 +1,9 @@
---
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pre-pull master system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_master/tasks/upgrade_facts.yml b/roles/openshift_master/tasks/upgrade_facts.yml
index f6ad438aa..2252c003a 100644
--- a/roles/openshift_master/tasks/upgrade_facts.yml
+++ b/roles/openshift_master/tasks/upgrade_facts.yml
@@ -21,6 +21,10 @@
oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
when: oreg_host is not defined
+- set_fact:
+ oreg_auth_credentials_replace: False
+ when: oreg_auth_credentials_replace is not defined
+
- name: Set openshift_master_debug_level
set_fact:
openshift_master_debug_level: "{{ debug_level | default(2) }}"
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 7ec26ceb7..3f7a528a9 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -21,7 +21,7 @@ AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}
{% endif %}
{% if 'api_env_vars' in openshift.master or 'controllers_env_vars' in openshift.master -%}
-{% for key, value in openshift.master.api_env_vars.items() | default([]) | union(openshift.master.controllers_env_vars.items() | default([])) -%}
+{% for key, value in (openshift.master.api_env_vars | default({})).items() | union((openshift.master.controllers_env_vars | default({})).items()) -%}
{{ key }}={{ value }}
{% endfor -%}
{% endif -%}
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 7159ccc7f..40775571f 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -179,7 +179,7 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_sdn_network_plugin_name == 'cni' %}
+{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index f7f3ac2b1..a4f410296 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -363,7 +363,6 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase):
def validate(self):
''' validate this idp instance '''
- IdentityProviderOauthBase.validate(self)
if not isinstance(self.provider['claims'], dict):
raise errors.AnsibleFilterError("|failed claims for provider {0} "
"must be a dictionary".format(self.__class__.__name__))
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index ed698daca..b74f22c00 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -109,3 +109,78 @@ Author Information
------------------
Jose David Martín (j.david.nieto@gmail.com)
+
+Image update procedure
+----------------------
+An upgrade of the metrics stack from older version to newer is an automated process and should be performed by calling appropriate ansible playbook and setting required ansible variables in your inventory as documented in https://docs.openshift.org/.
+
+Following text describes manual update of the metrics images without version upgrade. To determine the current version of images being used you can:
+```
+oc describe pod | grep 'Image ID:'
+```
+This will get the repo digest that can later be compared to the inspected image details.
+
+A way to determine when was your image last updated:
+```
+$ docker images
+REPOSITORY TAG IMAGE ID CREATED SIZE
+<registry>/openshift3/origin-metrics-cassandra v3.7 f8ad8d569e27 14 hours ago 783.7 MB
+
+$ docker inspect 9c3597aeb39f
+[
+ {
+ . . .
+ "RepoDigests": [
+ "<registry>/openshift3/metrics-cassandra@sha256:d37fc0cab268625b53a92bb98d09fcc501cfca1c68e16bac6dd98446d32ba135
+ ],
+ . . .
+ "Config": {
+ . . .
+ "Labels": {
+ . . .
+ "build-date": "2017-10-17T16:47:44.350655",
+ . . .
+ "release": "0.143.4.0",
+ . . .
+ "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/openshift3/metrics-cassandra/images/v3.7.0-0.143.4.0",
+ . . .
+ "version": "v3.7.0"
+ }
+ },
+ . . .
+```
+
+Pull a new image to see if registry has any newer images with the same tag:
+```
+$ docker pull <registry>/openshift3/origin-metrics-cassandra:v3.7
+```
+
+If there was an update, you need to run the `docker pull` on each node.
+
+It is recommended that you now rerun the `openshift_metrics` playbook to ensure that any necessary config changes are also picked up.
+
+To manually redeploy your pod you can do the following:
+- for a DC you can do:
+```
+oc rollout latest <dc_name>
+```
+
+- for a RC you can scale down and scale back up
+```
+oc scale --replicas=0 <rc_name>
+
+... wait for scale down
+
+oc scale --replicas=<original_replica_count> <rc_name>
+```
+
+- for a DS you can delete the pod or unlabel and relabel your node
+```
+oc delete pod --selector=<ds_selector>
+```
+
+Changelog
+---------
+
+Tue Oct 10, 2017
+- Default imagePullPolicy changed from Always to IfNotPresent
diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
index 6f341bcfb..6a3811598 100644
--- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
@@ -30,7 +30,7 @@ spec:
{% endif %}
containers:
- image: "{{ openshift_metrics_image_prefix }}metrics-cassandra:{{ openshift_metrics_image_version }}"
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
name: hawkular-cassandra-{{ node }}
ports:
- name: cql-port
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index 59f7fb44a..0662bea53 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -25,7 +25,7 @@ spec:
{% endif %}
containers:
- image: {{openshift_metrics_image_prefix}}metrics-hawkular-metrics:{{openshift_metrics_image_version}}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
name: hawkular-metrics
ports:
- name: http-endpoint
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
index d65eaf9ae..40d09e9fa 100644
--- a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
@@ -25,7 +25,7 @@ spec:
{% endif %}
containers:
- image: {{openshift_metrics_image_prefix}}metrics-hawkular-openshift-agent:{{openshift_metrics_image_version}}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
name: hawkular-openshift-agent
{% if ((openshift_metrics_hawkular_agent_limits_cpu is defined and openshift_metrics_hawkular_agent_limits_cpu is not none)
or (openshift_metrics_hawkular_agent_limits_memory is defined and openshift_metrics_hawkular_agent_limits_memory is not none)
diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2
index d8c7763ea..e732c1eee 100644
--- a/roles/openshift_metrics/templates/heapster.j2
+++ b/roles/openshift_metrics/templates/heapster.j2
@@ -27,7 +27,7 @@ spec:
containers:
- name: heapster
image: {{openshift_metrics_image_prefix}}metrics-heapster:{{openshift_metrics_image_version}}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
ports:
- containerPort: 8082
name: "http-endpoint"
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index b310a8f64..b9f16dfd4 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -104,6 +104,9 @@ openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}"
openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}"
+openshift_node_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}"
+
openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 20d7a9539..164a79b39 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -1,4 +1,9 @@
---
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pre-pull node system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index e09063aa5..0f73ce454 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -10,6 +10,11 @@
l_service_name: "{{ openshift.docker.service_name }}"
when: not l_use_crio
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pre-pull OpenVSwitch system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 08e1c7f4f..718d35dca 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -44,7 +44,7 @@ networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
+{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_use_kuryr | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
@@ -67,9 +67,11 @@ servingInfo:
{% endfor %}
{% endif %}
volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes
+{% if not (openshift_node_use_kuryr | default(False)) | bool %}
proxyArguments:
proxy-mode:
- {{ openshift.node.proxy_mode }}
+{% endif %}
volumeConfig:
localQuota:
perFSGroup: {{ openshift.node.local_quota_per_fsgroup }}
diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml
index 4abe8bcaf..ef66bf9ca 100644
--- a/roles/openshift_node_certificates/handlers/main.yml
+++ b/roles/openshift_node_certificates/handlers/main.yml
@@ -2,9 +2,21 @@
- name: update ca trust
command: update-ca-trust
notify:
- - restart docker after updating ca trust
+ - check for container runtime after updating ca trust
-- name: restart docker after updating ca trust
+- name: check for container runtime after updating ca trust
+ command: >
+ systemctl -q is-active {{ openshift.docker.service_name }}.service
+ register: l_docker_installed
+ # An rc of 0 indicates that the container runtime service is
+ # running. We will restart it by notifying the restart handler since
+ # we have updated the system CA trust.
+ changed_when: l_docker_installed.rc == 0
+ failed_when: false
+ notify:
+ - restart container runtime after updating ca trust
+
+- name: restart container runtime after updating ca trust
systemd:
name: "{{ openshift.docker.service_name }}"
state: restarted
diff --git a/roles/openshift_node_dnsmasq/README.md b/roles/openshift_node_dnsmasq/README.md
new file mode 100644
index 000000000..4596190d7
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/README.md
@@ -0,0 +1,27 @@
+OpenShift Node DNS resolver
+===========================
+
+Configure dnsmasq to act as a DNS resolver for an OpenShift node.
+
+Requirements
+------------
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value | Description |
+|-----------------------------------------------------|---------------|-----------------------------------------------------------------------------------|
+| openshift_node_dnsmasq_install_network_manager_hook | true | Install NetworkManager hook updating /etc/resolv.conf with local dnsmasq instance |
+
+Dependencies
+------------
+
+* openshift_common
+* openshift_node_facts
+
+License
+-------
+
+Apache License Version 2.0
diff --git a/roles/openshift_node_dnsmasq/defaults/main.yml b/roles/openshift_node_dnsmasq/defaults/main.yml
index ed97d539c..eae832fcf 100644
--- a/roles/openshift_node_dnsmasq/defaults/main.yml
+++ b/roles/openshift_node_dnsmasq/defaults/main.yml
@@ -1 +1,2 @@
---
+openshift_node_dnsmasq_install_network_manager_hook: true
diff --git a/roles/openshift_node_dnsmasq/tasks/network-manager.yml b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
index dddcfc9da..e5a92a630 100644
--- a/roles/openshift_node_dnsmasq/tasks/network-manager.yml
+++ b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
@@ -5,5 +5,6 @@
dest: /etc/NetworkManager/dispatcher.d/
mode: 0755
notify: restart NetworkManager
+ when: openshift_node_dnsmasq_install_network_manager_hook | default(true) | bool
- meta: flush_handlers
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
index c5a44bffb..92f74928c 100644
--- a/roles/openshift_prometheus/README.md
+++ b/roles/openshift_prometheus/README.md
@@ -17,16 +17,16 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_prometheus_namespace`: project (i.e. namespace) where the components will be
deployed.
-- `openshift_prometheus_replicas`: The number of replicas for prometheus deployment.
-
- `openshift_prometheus_node_selector`: Selector for the nodes prometheus will be deployed on.
-- `openshift_prometheus_image_<COMPONENT>`: specify image for the component
+- `openshift_prometheus_<COMPONENT>_image_prefix`: specify image prefix for the component
+
+- `openshift_prometheus_<COMPONENT>_image_version`: specify image version for the component
-## Storage related variables
-Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can set pv claim by setting corresponding role variable:
+## PVC related variables
+Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable:
```
-openshift_prometheus_<COMPONENT>_storage_type: <VALUE>
+openshift_prometheus_<COMPONENT>_storage_type: <VALUE> (pvc, emptydir)
openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE>
```
e.g
@@ -37,6 +37,29 @@ openshift_prometheus_alertbuffer_pvc_size: 10G
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
```
+## NFS PV Storage variables
+Each prometheus component (prometheus, alertmanager, alertbuffer) can set nfs pv by setting corresponding variable:
+```
+openshift_prometheus_<COMPONENT>_storage_kind=<VALUE>
+openshift_prometheus_<COMPONENT>_storage_(access_modes|host|labels)=<VALUE>
+openshift_prometheus_<COMPONENT>_storage_volume_(name|size)=<VALUE>
+openshift_prometheus_<COMPONENT>_storage_nfs_(directory|options)=<VALUE>
+```
+e.g
+```
+openshift_prometheus_storage_kind=nfs
+openshift_prometheus_storage_access_modes=['ReadWriteOnce']
+openshift_prometheus_storage_host=nfs.example.com #for external host
+openshift_prometheus_storage_nfs_directory=/exports
+openshift_prometheus_storage_alertmanager_nfs_options='*(rw,root_squash)'
+openshift_prometheus_storage_volume_name=prometheus
+openshift_prometheus_storage_alertbuffer_volume_size=10Gi
+openshift_prometheus_storage_labels={'storage': 'prometheus'}
+```
+
+NOTE: Setting `openshift_prometheus_<COMPONENT>_storage_labels` overrides `openshift_prometheus_<COMPONENT>_pvc_pv_selector`
+
+
## Additional Alert Rules file variable
An external file with alert rules can be added by setting path to additional rules variable:
```
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index c08bec4cb..00995eee6 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -4,32 +4,38 @@ openshift_prometheus_state: present
openshift_prometheus_namespace: prometheus
-openshift_prometheus_replicas: 1
openshift_prometheus_node_selector: {"region":"infra"}
-# images
-openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
-openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
-openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:v0.9.1"
-openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1"
+# image defaults
+openshift_prometheus_image_prefix: "openshift/"
+openshift_prometheus_image_version: "v2.0.0-dev.3"
+openshift_prometheus_proxy_image_prefix: "openshift/"
+openshift_prometheus_proxy_image_version: "v1.0.0"
+openshift_prometheus_alertmanager_image_prefix: "openshift/"
+openshift_prometheus_alertmanager_image_version: "v0.9.1"
+openshift_prometheus_alertbuffer_image_prefix: "openshift/"
+openshift_prometheus_alertbuffer_image_version: "v0.0.2"
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
# storage
-openshift_prometheus_storage_type: pvc
+# One of ['emptydir', 'pvc']
+openshift_prometheus_storage_type: "emptydir"
openshift_prometheus_pvc_name: prometheus
openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}"
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}"
-openshift_prometheus_alertmanager_storage_type: pvc
+# One of ['emptydir', 'pvc']
+openshift_prometheus_alertmanager_storage_type: "emptydir"
openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}"
-openshift_prometheus_alertbuffer_storage_type: pvc
+# One of ['emptydir', 'pvc']
+openshift_prometheus_alertbuffer_storage_type: "emptydir"
openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index cb75eedca..00c3c1987 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -128,6 +128,7 @@
access_modes: "{{ openshift_prometheus_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_pvc_size }}"
selector: "{{ openshift_prometheus_pvc_pv_selector }}"
+ when: openshift_prometheus_storage_type == 'pvc'
- name: create alertmanager pvc
oc_pvc:
@@ -136,6 +137,7 @@
access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}"
selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}"
+ when: openshift_prometheus_alertmanager_storage_type == 'pvc'
- name: create alertbuffer pvc
oc_pvc:
@@ -144,22 +146,23 @@
access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}"
selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}"
+ when: openshift_prometheus_alertbuffer_storage_type == 'pvc'
-# create prometheus deployment
-- name: Set prometheus deployment template
+# create prometheus stateful set
+- name: Set prometheus template
template:
- src: prometheus_deployment.j2
+ src: prometheus.j2
dest: "{{ tempdir }}/templates/prometheus.yaml"
vars:
namespace: "{{ openshift_prometheus_namespace }}"
- prom_replicas: "{{ openshift_prometheus_replicas }}"
+# prom_replicas: "{{ openshift_prometheus_replicas }}"
-- name: Set prometheus deployment
+- name: Set prometheus stateful set
oc_obj:
state: "{{ state }}"
name: "prometheus"
namespace: "{{ openshift_prometheus_namespace }}"
- kind: deployment
+ kind: statefulset
files:
- "{{ tempdir }}/templates/prometheus.yaml"
delete_after: true
diff --git a/roles/openshift_prometheus/templates/prometheus_deployment.j2 b/roles/openshift_prometheus/templates/prometheus.j2
index 66eab6df4..916c57aa2 100644
--- a/roles/openshift_prometheus/templates/prometheus_deployment.j2
+++ b/roles/openshift_prometheus/templates/prometheus.j2
@@ -1,12 +1,14 @@
-apiVersion: extensions/v1beta1
-kind: Deployment
+apiVersion: apps/v1beta1
+kind: StatefulSet
metadata:
name: prometheus
namespace: {{ namespace }}
labels:
app: prometheus
spec:
- replicas: {{ prom_replicas|default(1) }}
+ updateStrategy:
+ type: RollingUpdate
+ podManagementPolicy: Parallel
selector:
provider: openshift
matchLabels:
@@ -27,7 +29,7 @@ spec:
containers:
# Deploy Prometheus behind an oauth proxy
- name: prom-proxy
- image: "{{ openshift_prometheus_image_proxy }}"
+ image: "{{openshift_prometheus_proxy_image_prefix}}oauth-proxy:{{openshift_prometheus_proxy_image_version}}"
imagePullPolicy: IfNotPresent
resources:
requests:
@@ -60,6 +62,8 @@ spec:
- -tls-key=/etc/tls/private/tls.key
- -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
- -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -openshift-ca=/etc/pki/tls/cert.pem
+ - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- -skip-auth-regex=^/metrics
volumeMounts:
- mountPath: /etc/tls/private
@@ -72,9 +76,10 @@ spec:
- name: prometheus
args:
- --storage.tsdb.retention=6h
+ - --storage.tsdb.min-block-duration=2m
- --config.file=/etc/prometheus/prometheus.yml
- --web.listen-address=localhost:9090
- image: "{{ openshift_prometheus_image_prometheus }}"
+ image: "{{openshift_prometheus_image_prefix}}prometheus:{{openshift_prometheus_image_version}}"
imagePullPolicy: IfNotPresent
resources:
requests:
@@ -100,7 +105,7 @@ spec:
# Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy
- name: alerts-proxy
- image: "{{ openshift_prometheus_image_proxy }}"
+ image: "{{openshift_prometheus_proxy_image_prefix}}oauth-proxy:{{openshift_prometheus_proxy_image_version}}"
imagePullPolicy: IfNotPresent
resources:
requests:
@@ -133,6 +138,8 @@ spec:
- -tls-key=/etc/tls/private/tls.key
- -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
- -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -openshift-ca=/etc/pki/tls/cert.pem
+ - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
volumeMounts:
- mountPath: /etc/tls/private
name: alerts-tls
@@ -142,7 +149,7 @@ spec:
- name: alert-buffer
args:
- --storage-path=/alert-buffer/messages.db
- image: "{{ openshift_prometheus_image_alertbuffer }}"
+ image: "{{openshift_prometheus_alertbuffer_image_prefix}}prometheus-alert-buffer:{{openshift_prometheus_alertbuffer_image_version}}"
imagePullPolicy: IfNotPresent
resources:
requests:
@@ -169,7 +176,7 @@ spec:
- name: alertmanager
args:
- -config.file=/etc/alertmanager/alertmanager.yml
- image: "{{ openshift_prometheus_image_alertmanager }}"
+ image: "{{openshift_prometheus_alertmanager_image_prefix}}prometheus-alertmanager:{{openshift_prometheus_alertmanager_image_version}}"
imagePullPolicy: IfNotPresent
resources:
requests:
diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml
index e534e0cca..7c1573096 100644
--- a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml
+++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml
@@ -21,16 +21,22 @@
openshift_logging_image_pull_secret: openshift_hosted_logging_image_pull_secret
openshift_logging_kibana_hostname: openshift_hosted_logging_hostname
openshift_logging_kibana_ops_hostname: openshift_hosted_logging_ops_hostname
+ openshift_logging_kibana_nodeselector: openshift_hosted_logging_kibana_nodeselector
+ openshift_logging_kibana_ops_nodeselector: openshift_hosted_logging_kibana_ops_nodeselector
openshift_logging_fluentd_journal_source: openshift_hosted_logging_journal_source
openshift_logging_fluentd_journal_read_from_head: openshift_hosted_logging_journal_read_from_head
+ openshift_logging_fluentd_nodeselector: openshift_hosted_logging_fluentd_nodeselector_label
openshift_logging_es_memory_limit: openshift_hosted_logging_elasticsearch_instance_ram
openshift_logging_es_nodeselector: openshift_hosted_logging_elasticsearch_nodeselector
+ openshift_logging_es_ops_nodeselector: openshift_hosted_logging_elasticsearch_ops_nodeselector
openshift_logging_es_ops_memory_limit: openshift_hosted_logging_elasticsearch_ops_instance_ram
openshift_logging_storage_access_modes: openshift_hosted_logging_storage_access_modes
openshift_logging_master_public_url: openshift_hosted_logging_master_public_url
openshift_logging_image_prefix: openshift_hosted_logging_deployer_prefix
openshift_logging_image_version: openshift_hosted_logging_deployer_version
openshift_logging_install_logging: openshift_hosted_logging_deploy
+ openshift_logging_curator_nodeselector: openshift_hosted_logging_curator_nodeselector
+ openshift_logging_curator_ops_nodeselector: openshift_hosted_logging_curator_ops_nodeselector
- set_fact:
@@ -40,9 +46,3 @@
openshift_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size | default('10Gi') if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
openshift_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}"
- openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}"
- openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}"
- openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}"
- openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
- openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index e327ee9f5..74c1a51a8 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -54,3 +54,16 @@
- include: unsupported.yml
when:
- not openshift_enable_unsupported_configurations | default(false) | bool
+
+- name: Ensure clusterid is set along with the cloudprovider
+ fail:
+ msg: >
+ Ensure that the openshift_clusterid is set and that all infrastructure has the required tags.
+
+ For dynamic provisioning when using multiple clusters in different zones, tag each node with Key=kubernetes.io/cluster/xxxx,Value=clusterid where xxxx and clusterid are unique per cluster. In versions prior to 3.6, this was Key=KubernetesCluster,Value=clusterid.
+
+ https://github.com/openshift/openshift-docs/blob/master/install_config/persistent_storage/dynamically_provisioning_pvs.adoc#available-dynamically-provisioned-plug-ins
+ when:
+ - openshift_clusterid is not defined
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind == 'aws'
diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
index 39bf1780a..b70ab90a1 100644
--- a/roles/openshift_sanitize_inventory/tasks/unsupported.yml
+++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
@@ -11,6 +11,14 @@
will not function. This also means that NetworkManager must be installed
enabled and responsible for management of the primary interface.
+- name: Ensure that openshift_node_dnsmasq_install_network_manager_hook is true
+ when:
+ - not openshift_node_dnsmasq_install_network_manager_hook | default(true) | bool
+ fail:
+ msg: |-
+ The NetworkManager hook is considered a critical part of the DNS
+ infrastructure.
+
- set_fact:
__using_dynamic: True
when:
diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
index 71e21a269..f449fba2b 100644
--- a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
+++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
@@ -1,25 +1,26 @@
apiVersion: v1
kind: Template
metadata:
- name: service-catalog
+ name: service-catalog-role-bindings
objects:
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: servicecatalog-serviceclass-viewer
rules:
- apiGroups:
- servicecatalog.k8s.io
resources:
- - serviceclasses
+ - clusterserviceclasses
+ - clusterserviceplans
verbs:
- list
- watch
- get
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: servicecatalog-serviceclass-viewer-binding
roleRef:
@@ -37,8 +38,8 @@ objects:
metadata:
name: service-catalog-apiserver
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: sar-creator
rules:
@@ -49,17 +50,19 @@ objects:
verbs:
- create
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-sar-creator-binding
roleRef:
name: sar-creator
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: namespace-viewer
rules:
@@ -72,26 +75,30 @@ objects:
- watch
- get
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-namespace-viewer-binding
roleRef:
name: namespace-viewer
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-controller-namespace-viewer-binding
roleRef:
name: namespace-viewer
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-controller
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-controller
+ namespace: kube-service-catalog
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: service-catalog-controller
rules:
@@ -102,6 +109,7 @@ objects:
verbs:
- create
- update
+ - patch
- delete
- get
- list
@@ -109,19 +117,22 @@ objects:
- apiGroups:
- servicecatalog.k8s.io
resources:
- - brokers/status
- - instances/status
- - bindings/status
+ - clusterservicebrokers/status
+ - serviceinstances/status
+ - servicebindings/status
+ - servicebindings/finalizers
+ - serviceinstances/reference
verbs:
- update
- apiGroups:
- servicecatalog.k8s.io
resources:
- - brokers
- - instances
- - bindings
+ - clusterservicebrokers
+ - serviceinstances
+ - servicebindings
verbs:
- list
+ - get
- watch
- apiGroups:
- ""
@@ -133,7 +144,8 @@ objects:
- apiGroups:
- servicecatalog.k8s.io
resources:
- - serviceclasses
+ - clusterserviceclasses
+ - clusterserviceplans
verbs:
- create
- delete
@@ -154,17 +166,19 @@ objects:
- list
- watch
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-controller-binding
roleRef:
name: service-catalog-controller
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-controller
-
-- kind: Role
- apiVersion: v1
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-controller
+ namespace: kube-service-catalog
+
+- apiVersion: authorization.openshift.io/v1
+ kind: Role
metadata:
name: endpoint-accessor
rules:
@@ -179,21 +193,25 @@ objects:
- create
- update
-- kind: RoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: RoleBinding
metadata:
- name: endpoint-accessor-binding
+ name: endpointer-accessor-binding
roleRef:
name: endpoint-accessor
namespace: kube-service-catalog
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-controller
+ subjects:
+ - kind: ServiceAccount
+ namespace: kube-service-catalog
+ name: service-catalog-controller
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: system:auth-delegator-binding
roleRef:
name: system:auth-delegator
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
diff --git a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
index f6ee0955d..f563ae42e 100644
--- a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
+++ b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
@@ -1,11 +1,11 @@
apiVersion: v1
kind: Template
metadata:
- name: kube-system-service-catalog
+ name: kube-system-service-catalog-role-bindings
objects:
-- kind: Role
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: Role
metadata:
name: extension-apiserver-authentication-reader
namespace: ${KUBE_SYSTEM_NAMESPACE}
@@ -19,16 +19,18 @@ objects:
verbs:
- get
-- kind: RoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: RoleBinding
metadata:
name: extension-apiserver-authentication-reader-binding
namespace: ${KUBE_SYSTEM_NAMESPACE}
roleRef:
name: extension-apiserver-authentication-reader
- namespace: kube-system
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ namespace: ${KUBE_SYSTEM_NAMESPACE}
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
parameters:
- description: Do not change this value.
diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml
index cc897b032..9d55185c8 100644
--- a/roles/openshift_service_catalog/tasks/generate_certs.yml
+++ b/roles/openshift_service_catalog/tasks/generate_certs.yml
@@ -16,6 +16,16 @@
--key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt
--serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer
+- name: Delete old apiserver.crt
+ file:
+ path: "{{ generated_certs_dir }}/apiserver.crt"
+ state: absent
+
+- name: Delete old apiserver.key
+ file:
+ path: "{{ generated_certs_dir }}/apiserver.key"
+ state: absent
+
- name: Generating server keys
oc_adm_ca_server_cert:
cert: "{{ generated_certs_dir }}/apiserver.crt"
@@ -36,19 +46,28 @@
- name: tls.key
path: "{{ generated_certs_dir }}/apiserver.key"
+- name: Create service-catalog-ssl secret
+ oc_secret:
+ state: present
+ name: service-catalog-ssl
+ namespace: kube-service-catalog
+ files:
+ - name: tls.crt
+ path: "{{ generated_certs_dir }}/apiserver.crt"
+
- slurp:
src: "{{ generated_certs_dir }}/ca.crt"
register: apiserver_ca
- shell: >
- oc get apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
+ oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
register: get_apiservices
changed_when: no
- name: Create api service
oc_obj:
state: present
- name: v1alpha1.servicecatalog.k8s.io
+ name: v1beta1.servicecatalog.k8s.io
kind: apiservices.apiregistration.k8s.io
namespace: "kube-service-catalog"
content:
@@ -57,10 +76,10 @@
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
- name: v1alpha1.servicecatalog.k8s.io
+ name: v1beta1.servicecatalog.k8s.io
spec:
group: servicecatalog.k8s.io
- version: v1alpha1
+ version: v1beta1
service:
namespace: "kube-service-catalog"
name: apiserver
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index e202ae173..aa3ec5724 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -47,16 +47,15 @@
dest: "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
- oc_obj:
- name: service-catalog
+ name: service-catalog-role-bindings
kind: template
namespace: "kube-service-catalog"
files:
- "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
- delete_after: yes
- oc_process:
create: True
- template_name: service-catalog
+ template_name: service-catalog-role-bindings
namespace: "kube-service-catalog"
- copy:
@@ -64,16 +63,15 @@
dest: "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
- oc_obj:
- name: kube-system-service-catalog
+ name: kube-system-service-catalog-role-bindings
kind: template
namespace: kube-system
files:
- "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
- delete_after: yes
- oc_process:
create: True
- template_name: kube-system-service-catalog
+ template_name: kube-system-service-catalog-role-bindings
namespace: kube-system
- oc_obj:
@@ -90,14 +88,14 @@
vars:
original_content: "{{ edit_yaml.results.results[0] | to_yaml }}"
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update edit role for service catalog and pod preset access
command: >
oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
- oc_obj:
name: admin
@@ -113,14 +111,14 @@
vars:
original_content: "{{ admin_yaml.results.results[0] | to_yaml }}"
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update admin role for service catalog and pod preset access
command: >
oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
- oc_adm_policy_user:
namespace: kube-service-catalog
diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml
index 2fb1ec440..ca9844e79 100644
--- a/roles/openshift_service_catalog/tasks/remove.yml
+++ b/roles/openshift_service_catalog/tasks/remove.yml
@@ -1,11 +1,7 @@
---
- name: Remove Service Catalog APIServer
command: >
- oc delete apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
-
-- name: Remove Policy Binding
- command: >
- oc delete policybindings/kube-system:default -n kube-system --ignore-not-found
+ oc delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
# TODO: this module doesn't currently remove this
#- name: Remove service catalog api service
@@ -13,7 +9,7 @@
# state: absent
# namespace: "kube-service-catalog"
# kind: apiservices.apiregistration.k8s.io
-# name: v1alpha1.servicecatalog.k8s.io
+# name: v1beta1.servicecatalog.k8s.io
- name: Remove Service Catalog API Server route
oc_obj:
@@ -50,6 +46,26 @@
kind: deployment
name: controller-manager
+- name: Remove Service Catalog kube-system Role Bindinds
+ shell: >
+ oc process kube-system-service-catalog-role-bindings -n kube-system | oc delete --ignore-not-found -f -
+
+- oc_obj:
+ kind: template
+ name: "kube-system-service-catalog-role-bindings"
+ namespace: kube-system
+ state: absent
+
+- name: Remove Service Catalog kube-service-catalog Role Bindinds
+ shell: >
+ oc process service-catalog-role-bindings -n kube-service-catalog | oc delete --ignore-not-found -f -
+
+- oc_obj:
+ kind: template
+ name: "service-catalog-role-bindings"
+ namespace: kube-service-catalog
+ state: absent
+
- name: Remove Service Catalog namespace
oc_project:
state: absent
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
index c09834fd4..5d5352c1c 100644
--- a/roles/openshift_service_catalog/templates/api_server.j2
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -41,7 +41,9 @@ spec:
- --cors-allowed-origins
- {{ cors_allowed_origin }}
- --admission-control
- - "KubernetesNamespaceLifecycle"
+ - KubernetesNamespaceLifecycle,DefaultServicePlan,ServiceBindingsLifecycle,ServicePlanChangeValidator,BrokerAuthSarCheck
+ - --feature-gates
+ - OriginatingIdentity=true
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
command: ["/usr/bin/apiserver"]
imagePullPolicy: Always
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
index 1bbc0fa2c..2272cbb44 100644
--- a/roles/openshift_service_catalog/templates/controller_manager.j2
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -31,7 +31,12 @@ spec:
args:
- -v
- "5"
- - "--leader-election-namespace=$(K8S_NAMESPACE)"
+ - --leader-election-namespace
+ - kube-service-catalog
+ - --broker-relist-interval
+ - "5m"
+ - --feature-gates
+ - OriginatingIdentity=true
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
command: ["/usr/bin/controller-manager"]
imagePullPolicy: Always
@@ -41,7 +46,19 @@ spec:
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - mountPath: /var/run/kubernetes-service-catalog
+ name: service-catalog-ssl
+ readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
securityContext: {}
terminationGracePeriodSeconds: 30
+ volumes:
+ - name: service-catalog-ssl
+ secret:
+ defaultMode: 420
+ items:
+ - key: tls.crt
+ path: apiserver.crt
+ secretName: apiserver-ssl
diff --git a/roles/openshift_service_catalog/templates/sc_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_role_patching.j2
index 69b062b3f..4629d5bb3 100644
--- a/roles/openshift_service_catalog/templates/sc_role_patching.j2
+++ b/roles/openshift_service_catalog/templates/sc_role_patching.j2
@@ -3,8 +3,8 @@
- "servicecatalog.k8s.io"
attributeRestrictions: null
resources:
- - instances
- - bindings
+ - serviceinstances
+ - servicebindings
verbs:
- create
- update
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 54008bbf1..6a532a206 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -6,7 +6,7 @@
- "{{ openshift_deployment_type | default(deployment_type) }}.yml"
- "default_images.yml"
-- name: set ansible_service_broker facts
+- name: set template_service_broker facts
set_fact:
template_service_broker_prefix: "{{ template_service_broker_prefix | default(__template_service_broker_prefix) }}"
template_service_broker_version: "{{ template_service_broker_version | default(__template_service_broker_version) }}"
@@ -76,7 +76,7 @@
when: openshift_master_config_dir is undefined
- slurp:
- src: "{{ openshift_master_config_dir }}/ca.crt"
+ src: "{{ openshift_master_config_dir }}/service-signer.crt"
register: __ca_bundle
# Register with broker
diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml
index f3afe65ed..28836f97f 100644
--- a/roles/template_service_broker/tasks/remove.yml
+++ b/roles/template_service_broker/tasks/remove.yml
@@ -13,11 +13,11 @@
- name: Delete TSB broker
shell: >
- oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | oc delete -f -
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | oc delete --ignore-not-found -f -
- name: Delete TSB objects
shell: >
- oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | kubectl delete -f -
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | oc delete --ignore-not-found -f -
- name: empty out tech preview extension file for service console UI
copy: