diff options
Diffstat (limited to 'roles')
55 files changed, 695 insertions, 299 deletions
diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index 8c4d19537..b5d2f7c6e 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -1,6 +1,6 @@ --- # The version of Contiv binaries to use -contiv_version: 1.0.1 +contiv_version: 1.1.1 # The version of cni binaries cni_version: v0.4.0 diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml index da6409f1e..a2c2f98a7 100644 --- a/roles/contiv/meta/main.yml +++ b/roles/contiv/meta/main.yml @@ -27,4 +27,4 @@ dependencies: etcd_peer_url_scheme: http when: contiv_role == "netmaster" - role: contiv_auth_proxy - when: (contiv_role == "netmaster") and (contiv_enable_auth_proxy == true) + when: contiv_role == "netmaster" diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml index acaf7386e..cc52d3a43 100644 --- a/roles/contiv/tasks/netmaster.yml +++ b/roles/contiv/tasks/netmaster.yml @@ -41,6 +41,18 @@ mode: 0644 notify: restart netmaster +- name: Netmaster | Ensure contiv_config_dir exists + file: + path: "{{ contiv_config_dir }}" + recurse: yes + state: directory + +- name: Netmaster | Setup contiv.json config for the cni plugin + template: + src: contiv.cfg.master.j2 + dest: "{{ contiv_config_dir }}/contiv.json" + notify: restart netmaster + - name: Netmaster | Copy systemd units for netmaster template: src: netmaster.service diff --git a/roles/contiv/templates/contiv.cfg.j2 b/roles/contiv/templates/contiv.cfg.j2 index 2c9a666a9..f0e99c556 100644 --- a/roles/contiv/templates/contiv.cfg.j2 +++ b/roles/contiv/templates/contiv.cfg.j2 @@ -2,5 +2,6 @@ "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}", "K8S_CA": "{{ openshift.common.config_base }}/node/ca.crt", "K8S_KEY": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.key", - "K8S_CERT": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.crt" + "K8S_CERT": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.crt", + "SVC_SUBNET": "172.30.0.0/16" } diff --git a/roles/contiv/templates/contiv.cfg.master.j2 b/roles/contiv/templates/contiv.cfg.master.j2 new file mode 100644 index 000000000..fac8e3c4c --- /dev/null +++ b/roles/contiv/templates/contiv.cfg.master.j2 @@ -0,0 +1,7 @@ +{ + "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}", + "K8S_CA": "{{ openshift.common.config_base }}/master/ca.crt", + "K8S_KEY": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.key", + "K8S_CERT": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.crt", + "SVC_SUBNET": "172.30.0.0/16" +} diff --git a/roles/contiv/templates/netmaster.service b/roles/contiv/templates/netmaster.service index 21c0380be..a602c955e 100644 --- a/roles/contiv/templates/netmaster.service +++ b/roles/contiv/templates/netmaster.service @@ -6,3 +6,5 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service EnvironmentFile=/etc/default/netmaster ExecStart={{ bin_dir }}/netmaster $NETMASTER_ARGS KillMode=control-group +Restart=on-failure +RestartSec=10 diff --git a/roles/contiv_auth_proxy/defaults/main.yml b/roles/contiv_auth_proxy/defaults/main.yml index 4e637a947..e1d904c6a 100644 --- a/roles/contiv_auth_proxy/defaults/main.yml +++ b/roles/contiv_auth_proxy/defaults/main.yml @@ -1,11 +1,12 @@ --- -auth_proxy_image: "contiv/auth_proxy:1.0.0-beta.2" +auth_proxy_image: "contiv/auth_proxy:1.1.1" auth_proxy_port: 10000 contiv_certs: "/var/contiv/certs" -cluster_store: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379" +cluster_store: "etcd://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379" auth_proxy_cert: "{{ contiv_certs }}/auth_proxy_cert.pem" auth_proxy_key: "{{ contiv_certs }}/auth_proxy_key.pem" auth_proxy_datastore: "{{ cluster_store }}" auth_proxy_binaries: "/var/contiv_cache" auth_proxy_local_install: False auth_proxy_rule_comment: "Contiv auth proxy service" +service_vip: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}" diff --git a/roles/contiv_auth_proxy/files/cert.pem b/roles/contiv_auth_proxy/files/cert.pem new file mode 100644 index 000000000..63df4603f --- /dev/null +++ b/roles/contiv_auth_proxy/files/cert.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFuTCCA6GgAwIBAgIJAOFyylO2zW2EMA0GCSqGSIb3DQEBCwUAMHMxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIU2FuIEpvc2UxDTALBgNVBAoM +BENQU0cxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxHTAbBgNVBAMMFGF1dGgtbG9j +YWwuY2lzY28uY29tMB4XDTE3MDcxMzE5NDYwMVoXDTI3MDcxMTE5NDYwMVowczEL +MAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMREwDwYDVQQHDAhTYW4gSm9zZTENMAsG +A1UECgwEQ1BTRzEWMBQGA1UECwwNSVQgRGVwYXJ0bWVudDEdMBsGA1UEAwwUYXV0 +aC1sb2NhbC5jaXNjby5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDKCg26dvsD1u3f1lCaLlVptyTyGyanaJ73mlHiUnAMcu0A/p3kzluTeQLZJxtl +MToM7rT/lun6fbhQC+7TQep9mufBzLhssyzRnT9rnGSeGwN66mO/rlYPZc5C1D7p +7QZh1uLznzgOA2zMkgnI+n6LB2TZWg+XLhZZIr5SVYE18lj0tnwq3R1uznVv9t06 +grUYK2K7x0Y3Pt2e6yV0e1w2FOGH+7v3mm0c8r1+7U+4EZ2SM3fdG7nyTL/187gl +yE8X4HOnAyYGbAnULJC02LR/DTQpv/RpLN/YJEpHZWApHZCKh+fbFdIhRRwEnT4L +DLy3GJVFDEsmFaC91wf24+HAeUl9/hRIbxo9x/7kXmrhMlK38x2oo3cPh0XZxHje +XmJUGG1OByAuIZaGFwS9lUuGTNvpN8P/v3HN/nORc0RE3fvoXIv4nuhaEfuo32q4 +dvO4aNjmxjz1JcUEx6DiMQe4ECaReYdvI+j9ZkUJj/e89iLsQ8gz5t3FTM+tmBi1 +hrRBAgWyRY5DKECVv2SNFiX55JQGA5vQDGw51qTTuhntfBhkHvhKL7V1FRZazx6N +wqFyynig/jplb1ZNdKZ9ZxngZr6qHIx4RcGaJ9HdVhik7NyUCiHjWeGagzun2Omq +FFXAD9Hmfctac5bGxx0FBi95kO8bd8b0GSIh2CWanETjawIDAQABo1AwTjAdBgNV +HQ4EFgQU5P1g5gFZot//iwEV98MwW2YXzEMwHwYDVR0jBBgwFoAU5P1g5gFZot// +iwEV98MwW2YXzEMwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAbWgN +BkFzzG5sbG7vUb23Ggv/0TCCuMtuKBGOBR0EW5Ssw6Aml7j3AGiy/1+2sdrQMsx2 +nVpexyQW5XS/X+8JjH7H7ifvwl3bVJ8xiR/9ioIJovrQojxQO0cUB2Lljj3bPd/R +/tddAhPj0uN9N7UAejA12kXGa0Rrzb2U1rIpO9jnTbQYJiTOSzFiiGRMZWx3hfsW +SDTpPmsV2Mh+jcmuxvPITl0s+vtqsm7SYoUZHwJ80LvrPbmk/5hTZGRsI3W5jipB +PpOxvBnAWnQH3miMhty2TDaQ9JjYUwnxjFFZvNIYtp8+eH4nlbSldbgZoUeAe8It +X6SsP8gT/uQh3TPvzNIfYROA7qTwoOQ8ZW8ssai/EttHAztFxketgNEfjwUTz8EJ +yKeyAJ7qk3zD5k7p33ZNLWjmN0Awx3fCE9OQmNUyNX7PpYb4i+tHWu3h6Clw0RUf +0gb1I+iyB3PXmpiYtxdMxGSi9CQIyWHzC4bsTQZkrzzIHWFSwewhUWOQ2Wko0hrv +DnkS5k0cMPn5aNxw56H6OI+6hb+y/GGkTxNY9Gbxypx6lgZson0EY80EPZOJAORM +XggJtTjiMpzvKh18DZY/Phmdh0C2tt8KYFdG83qLEhya9WZujbLAm38vIziFHbdX +jOitXBSPyVrV3JvsCVksp+YC8Lnv3FsM494R4kA= +-----END CERTIFICATE----- diff --git a/roles/contiv_auth_proxy/files/key.pem b/roles/contiv_auth_proxy/files/key.pem new file mode 100644 index 000000000..7224e569c --- /dev/null +++ b/roles/contiv_auth_proxy/files/key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEAygoNunb7A9bt39ZQmi5Vabck8hsmp2ie95pR4lJwDHLtAP6d +5M5bk3kC2ScbZTE6DO60/5bp+n24UAvu00HqfZrnwcy4bLMs0Z0/a5xknhsDeupj +v65WD2XOQtQ+6e0GYdbi8584DgNszJIJyPp+iwdk2VoPly4WWSK+UlWBNfJY9LZ8 +Kt0dbs51b/bdOoK1GCtiu8dGNz7dnusldHtcNhThh/u795ptHPK9fu1PuBGdkjN3 +3Ru58ky/9fO4JchPF+BzpwMmBmwJ1CyQtNi0fw00Kb/0aSzf2CRKR2VgKR2Qiofn +2xXSIUUcBJ0+Cwy8txiVRQxLJhWgvdcH9uPhwHlJff4USG8aPcf+5F5q4TJSt/Md +qKN3D4dF2cR43l5iVBhtTgcgLiGWhhcEvZVLhkzb6TfD/79xzf5zkXNERN376FyL ++J7oWhH7qN9quHbzuGjY5sY89SXFBMeg4jEHuBAmkXmHbyPo/WZFCY/3vPYi7EPI +M+bdxUzPrZgYtYa0QQIFskWOQyhAlb9kjRYl+eSUBgOb0AxsOdak07oZ7XwYZB74 +Si+1dRUWWs8ejcKhcsp4oP46ZW9WTXSmfWcZ4Ga+qhyMeEXBmifR3VYYpOzclAoh +41nhmoM7p9jpqhRVwA/R5n3LWnOWxscdBQYveZDvG3fG9BkiIdglmpxE42sCAwEA +AQKCAgANVU6EoLd+EGAQZo9ZLXebi2eXxqztXV0oT/nZasFUQP1dFHCNGgU3HURP +2mHXcsE2+0XcnDQCwOs59R+kt3PnKCLlSkJdghGSH8OAsYh+WqAHK5K7oqCxUXGk +PWeNfoPuTwUZOMe1PQqgEX8t0UIqoKlKIsRmoLb+2Okge94UFlNCiwx0s7TujBd5 +9Ruycc/LsYlJhSQgHzj29OO65S03sHcVx0onU/yhbW+OAdFB/3+bl2PwppTF5cTB +UX00mRyHIdvgCLgoslaPtwUxuh9nRxLLMozJqBl5pSN1xL3s2LOiQMfPUIhWg74O +m+XtSsDlgGzRardG4ySBgsBWzcEnGWi5/xyc/6dtERzR382+CLUfOEoucGJHk6kj +RdbVx5FCawpAzjs9Wo49Vr+WQceSiBfb2+ndNUTiD0wu7xLEVPcYC6CMk71qZv5H +0qGlLhtkHF0nSQytbwqwfMz2SGDfkwIHgQ0gTKMpEMWK79E24ewE1BnMiaKC1bgk +evB6WM1YZFMKS5L7fshJcbeMe9dhSF3s+Y0MYVv5MCL1VMZyIzAcj8mkPYZyBRUk +MC87GnaebeTvHNtimvqCuWDGVI1SOoc1xtopkxinTqtIYGuQacrSmfyf9D3Rg4+l +kB0ibtJV+HLP94q266aef/PdpXszs7zo0h6skpLItW/jAuSNuQKCAQEA/VdXpMi8 +nfOtXwOZlGA2+jShYyHyCl2TKgbpfDGl1yKNkbBrIu2/PEl1DpmzSeG1tdNCzN68 +4vEjpF/jBsdSJj4BDiRY6HEcURXpw4yTZ7oCnUCbzadLIo3wX/gFDEVZz+0nQQ29 +5x0XGuQnJXC2fe/CyrkfltKhFSYoTSjtMbma4Pm3Q3HP3wGOvoUKtKNDO5rF26Qh +YtqJgJSKBAms0wKiy9VVTa6DaXrtSnXTR+Ltud3xnWBrX1Z+idwxYt/Be5W2woHf +M5zPIqMUgry5ujtRxhLmleFXDAYbaIQR9AZXlSS3w+9Gcl5EDRkFXqlaoCfppwTR +wakj2lNjbAidPwKCAQEAzCjgko4/Yss/0dCs8ySKd2IaRF93OwC/E2SHVqe5bATh +rVmDn/KIH4J2fI4FiaIHELT1CU5vmganYbK2k7CoJztjJltM1B7rkpHiVSL+qMqn +yBZFg3LFq9eiBPZHyQEc+HMJUhFRexjdeqLH78HCoPz1QnKo2xRoGHhSQ/Rh6lXo +20tldL9HrSxPRmwxnyLgWGcWopv/92JNxu6FgnZcnsVjkpO2mriLD7+Ty5qfvkwc +RFDBYnq2JjBcvqngrzDIGDzC7hTA5BRuuQdNMZggJwO6nKdZDUrq5NIo9B07FLj1 +IRMVm7D1vJYzYI6HW7Wj4vNRXMY8jG1fwvNG0+xy1QKCAQEA7m14R9bAZWuDnGt3 +7APNWheUWAcHk6fTq/cLYV4cdWfIkvfVLO9STrvXliEjcoIhkPk94jAy1ucZo0a3 +FJccgm9ScOvWXRSvEMUt12ODC1ktwq+esqMi/GdXdgqnPZA7YYwRqJD1TAC90Qou +qXb12Xp/+mjWCQ08mvnpbgz5hxXmZJvAVZJUj84YeMgfdjg9O2iDlB5ZaX7BcCjb +58bvRzww2ONzQAPhG7Gch7pyWTKCh64RCgtHold2CesY87QglV4mvdKarSmEbFXN +JOnXZiUT5fW93AtS8DcDLo81klMxtGT1KksUIukC5MzKl/eNGjPWG+FWRAwaeQyI +ApHs4wKCAQAI10RSVGKeTprm5Rh4Nv7gCJmGmHO7VF7x4gqSUBURfmyfax7uEDyg +0K982VGYEjIoIQ3zZzgh/WPGMU0CvEWr3UB/6rg6/1PINxUMBsXsXUpCueQsuw2g +UWgsutWE+M1eXOzsZt+Waw88PkxWL5fUDOA6DmkNg6a2WI+Hbc/HrAy3Yl50Xcwm +zaJpNEo5z/LTITOzuvmsps8jbDTP33xHS9jyAf+IV7F97xfhW0LLpNQciTq2nwXA +RZvejdCzBXPEyOzQDooD1natAInxOds6lUjBe+W5U6M0YX1whMuILDJBSmhHI7Sg +hAiZh9KIwCbmrw6468S3eA0LjillB/o5AoIBAQCg93syT50nYF2UWWP/rEa7qf6h ++YpBPpJskIl3NDMJtie9OcdsoFpjblpFbsMqsSag9KhGl7wn4f8qXO0HERSb8oYd +1Zu6BgUCuRXuAKNI4f508IooNpXx9y7xxl4giFBnDPa6W3KWqZ2LMDt92htMd/Zm +qvoyYZhFhMSyKFzPDAFdsZijJgahqJRKhHeW9BsPqho5i7Ys+PhE8e/vUZs2zUeS +QEHWhVisDTNKOoJIdz7JXFgEXCPTLAxXIIhYSkIfQxHxsWjt0vs79tzUkV8NlpKt +d7s0iyHnD6kDvoxYOSI9YmSEnnFBFdgeiD+/VD+7enOdqb5MHsjuw+by09ft +-----END RSA PRIVATE KEY----- diff --git a/roles/contiv_auth_proxy/templates/auth_proxy.j2 b/roles/contiv_auth_proxy/templates/auth_proxy.j2 index e82e5b4ab..0ab8c831b 100644 --- a/roles/contiv_auth_proxy/templates/auth_proxy.j2 +++ b/roles/contiv_auth_proxy/templates/auth_proxy.j2 @@ -14,7 +14,7 @@ start) -p 10000:{{ auth_proxy_port }} \ --net=host --name=auth-proxy \ -e NO_NETMASTER_STARTUP_CHECK=1 \ - -v /var/contiv:/var/contiv \ + -v /var/contiv:/var/contiv:z \ {{ auth_proxy_image }} \ --tls-key-file={{ auth_proxy_key }} \ --tls-certificate={{ auth_proxy_cert }} \ diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 221ef5094..7154fd839 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -1959,28 +1959,28 @@ class PolicyGroup(OpenShiftCLI): self.verbose = verbose self._rolebinding = None self._scc = None - self._cluster_policy_bindings = None - self._policy_bindings = None + self._cluster_role_bindings = None + self._role_bindings = None @property - def policybindings(self): - if self._policy_bindings is None: - results = self._get('clusterpolicybindings', None) + def rolebindings(self): + if self._role_bindings is None: + results = self._get('rolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve policybindings') - self._policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve rolebindings') + self._role_bindings = results['results'][0]['items'] - return self._policy_bindings + return self._role_bindings @property - def clusterpolicybindings(self): - if self._cluster_policy_bindings is None: - results = self._get('clusterpolicybindings', None) + def clusterrolebindings(self): + if self._cluster_role_bindings is None: + results = self._get('clusterrolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') - self._cluster_policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve clusterrolebindings') + self._cluster_role_bindings = results['results'][0]['items'] - return self._cluster_policy_bindings + return self._cluster_role_bindings @property def role_binding(self): @@ -2023,18 +2023,17 @@ class PolicyGroup(OpenShiftCLI): ''' return whether role_binding exists ''' bindings = None if self.config.config_options['resource_kind']['value'] == 'cluster-role': - bindings = self.clusterpolicybindings + bindings = self.clusterrolebindings else: - bindings = self.policybindings + bindings = self.rolebindings if bindings is None: return False - for binding in bindings['roleBindings']: - _rb = binding['roleBinding'] - if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ - _rb['groupNames'] is not None and \ - self.config.config_options['group']['value'] in _rb['groupNames']: + for binding in bindings: + if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \ + binding['groupNames'] is not None and \ + self.config.config_options['group']['value'] in binding['groupNames']: self.role_binding = binding return True diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 071562875..3fcf49799 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -1950,36 +1950,36 @@ class PolicyUser(OpenShiftCLI): ''' Class to handle attaching policies to users ''' def __init__(self, - policy_config, + config, verbose=False): ''' Constructor for PolicyUser ''' - super(PolicyUser, self).__init__(policy_config.namespace, policy_config.kubeconfig, verbose) - self.config = policy_config + super(PolicyUser, self).__init__(config.namespace, config.kubeconfig, verbose) + self.config = config self.verbose = verbose self._rolebinding = None self._scc = None - self._cluster_policy_bindings = None - self._policy_bindings = None + self._cluster_role_bindings = None + self._role_bindings = None @property - def policybindings(self): - if self._policy_bindings is None: - results = self._get('policybindings', None) + def rolebindings(self): + if self._role_bindings is None: + results = self._get('rolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve policybindings') - self._policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve rolebindings') + self._role_bindings = results['results'][0]['items'] - return self._policy_bindings + return self._role_bindings @property - def clusterpolicybindings(self): - if self._cluster_policy_bindings is None: - results = self._get('clusterpolicybindings', None) + def clusterrolebindings(self): + if self._cluster_role_bindings is None: + results = self._get('clusterrolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') - self._cluster_policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve clusterrolebindings') + self._cluster_role_bindings = results['results'][0]['items'] - return self._cluster_policy_bindings + return self._cluster_role_bindings @property def role_binding(self): @@ -2017,18 +2017,17 @@ class PolicyUser(OpenShiftCLI): ''' return whether role_binding exists ''' bindings = None if self.config.config_options['resource_kind']['value'] == 'cluster-role': - bindings = self.clusterpolicybindings + bindings = self.clusterrolebindings else: - bindings = self.policybindings + bindings = self.rolebindings if bindings is None: return False - for binding in bindings['roleBindings']: - _rb = binding['roleBinding'] - if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ - _rb['userNames'] is not None and \ - self.config.config_options['user']['value'] in _rb['userNames']: + for binding in bindings: + if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \ + binding['userNames'] is not None and \ + self.config.config_options['user']['value'] in binding['userNames']: self.role_binding = binding return True diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index 289f08b83..d101eac1c 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -1671,7 +1671,7 @@ class OCClusterRole(OpenShiftCLI): self.clusterrole = ClusterRole(content=result['results'][0]) result['results'] = self.clusterrole.yaml_dict - elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']: + elif '"{}" not found'.format(self.name) in result['stderr']: result['returncode'] = 0 self.clusterrole = None diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index a88639bfc..a21540962 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -110,6 +110,18 @@ options: - ReadOnlyMany - ReadWriteMany aliases: [] + storage_class_name: + description: + - The storage class name for the PVC + required: false + default: None + aliases: [] + selector: + description: + - A hash of key/values for the matchLabels + required: false + default: None + aliases: [] author: - "Kenny Woodson <kwoodson@redhat.com>" extends_documentation_fragment: [] @@ -1420,7 +1432,9 @@ class PersistentVolumeClaimConfig(object): namespace, kubeconfig, access_modes=None, - vol_capacity='1G'): + vol_capacity='1G', + selector=None, + storage_class_name=None): ''' constructor for handling pvc options ''' self.kubeconfig = kubeconfig self.name = sname @@ -1428,6 +1442,8 @@ class PersistentVolumeClaimConfig(object): self.access_modes = access_modes self.vol_capacity = vol_capacity self.data = {} + self.selector = selector + self.storage_class_name = storage_class_name self.create_dict() @@ -1445,12 +1461,16 @@ class PersistentVolumeClaimConfig(object): self.data['spec']['accessModes'] = ['ReadWriteOnce'] if self.access_modes: self.data['spec']['accessModes'] = self.access_modes + if self.selector: + self.data['spec']['selector'] = {'matchLabels': self.selector} # storage capacity self.data['spec']['resources'] = {} self.data['spec']['resources']['requests'] = {} self.data['spec']['resources']['requests']['storage'] = self.vol_capacity + if self.storage_class_name: + self.data['spec']['storageClassName'] = self.storage_class_name # pylint: disable=too-many-instance-attributes,too-many-public-methods class PersistentVolumeClaim(Yedit): @@ -1460,13 +1480,29 @@ class PersistentVolumeClaim(Yedit): volume_name_path = "spec.volumeName" bound_path = "status.phase" kind = 'PersistentVolumeClaim' + selector_path = "spec.selector.matchLabels" + storage_class_name_path = "spec.storageClassName" def __init__(self, content): - '''RoleBinding constructor''' + '''PersistentVolumeClaim constructor''' super(PersistentVolumeClaim, self).__init__(content=content) self._access_modes = None self._volume_capacity = None self._volume_name = None + self._selector = None + self._storage_class_name = None + + @property + def storage_class_name(self): + ''' storage_class_name property ''' + if self._storage_class_name is None: + self._storage_class_name = self.get_storage_class_name() + return self._storage_class_name + + @storage_class_name.setter + def storage_class_name(self, data): + ''' storage_class_name property setter''' + self._storage_class_name = data @property def volume_name(self): @@ -1481,6 +1517,24 @@ class PersistentVolumeClaim(Yedit): self._volume_name = data @property + def selector(self): + ''' selector property ''' + if self._selector is None: + self._selector = self.get_selector() + if not isinstance(self._selector, dict): + self._selector = dict(self._selector) + + return self._selector + + @selector.setter + def selector(self, data): + ''' selector property setter''' + if not isinstance(data, dict): + data = dict(data) + + self._selector = data + + @property def access_modes(self): ''' access_modes property ''' if self._access_modes is None: @@ -1510,6 +1564,14 @@ class PersistentVolumeClaim(Yedit): ''' volume_capacity property setter''' self._volume_capacity = data + def get_storage_class_name(self): + '''get storage_class_name''' + return self.get(PersistentVolumeClaim.storage_class_name_path) or [] + + def get_selector(self): + '''get selector''' + return self.get(PersistentVolumeClaim.selector_path) or [] + def get_access_modes(self): '''get access_modes''' return self.get(PersistentVolumeClaim.access_modes_path) or [] @@ -1663,6 +1725,8 @@ class OCPVC(OpenShiftCLI): params['kubeconfig'], params['access_modes'], params['volume_capacity'], + params['selector'], + params['storage_class_name'], ) oc_pvc = OCPVC(pconfig, verbose=params['debug']) @@ -1763,9 +1827,9 @@ def main(): name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), volume_capacity=dict(default='1G', type='str'), - access_modes=dict(default='ReadWriteOnce', - choices=['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'], - type='str'), + storage_class_name=dict(default=None, required=False, type='str'), + selector=dict(default=None, required=False, type='dict'), + access_modes=dict(default=['ReadWriteOnce'], type='list'), ), supports_check_mode=True, ) diff --git a/roles/lib_openshift/src/ansible/oc_pvc.py b/roles/lib_openshift/src/ansible/oc_pvc.py index a5181e281..c98d811d6 100644 --- a/roles/lib_openshift/src/ansible/oc_pvc.py +++ b/roles/lib_openshift/src/ansible/oc_pvc.py @@ -16,9 +16,9 @@ def main(): name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), volume_capacity=dict(default='1G', type='str'), - access_modes=dict(default='ReadWriteOnce', - choices=['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'], - type='str'), + storage_class_name=dict(default=None, required=False, type='str'), + selector=dict(default=None, required=False, type='dict'), + access_modes=dict(default=['ReadWriteOnce'], type='list'), ), supports_check_mode=True, ) diff --git a/roles/lib_openshift/src/class/oc_adm_policy_group.py b/roles/lib_openshift/src/class/oc_adm_policy_group.py index 1e51913e0..6ad57bdce 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_group.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_group.py @@ -41,28 +41,28 @@ class PolicyGroup(OpenShiftCLI): self.verbose = verbose self._rolebinding = None self._scc = None - self._cluster_policy_bindings = None - self._policy_bindings = None + self._cluster_role_bindings = None + self._role_bindings = None @property - def policybindings(self): - if self._policy_bindings is None: - results = self._get('clusterpolicybindings', None) + def rolebindings(self): + if self._role_bindings is None: + results = self._get('rolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve policybindings') - self._policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve rolebindings') + self._role_bindings = results['results'][0]['items'] - return self._policy_bindings + return self._role_bindings @property - def clusterpolicybindings(self): - if self._cluster_policy_bindings is None: - results = self._get('clusterpolicybindings', None) + def clusterrolebindings(self): + if self._cluster_role_bindings is None: + results = self._get('clusterrolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') - self._cluster_policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve clusterrolebindings') + self._cluster_role_bindings = results['results'][0]['items'] - return self._cluster_policy_bindings + return self._cluster_role_bindings @property def role_binding(self): @@ -105,18 +105,17 @@ class PolicyGroup(OpenShiftCLI): ''' return whether role_binding exists ''' bindings = None if self.config.config_options['resource_kind']['value'] == 'cluster-role': - bindings = self.clusterpolicybindings + bindings = self.clusterrolebindings else: - bindings = self.policybindings + bindings = self.rolebindings if bindings is None: return False - for binding in bindings['roleBindings']: - _rb = binding['roleBinding'] - if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ - _rb['groupNames'] is not None and \ - self.config.config_options['group']['value'] in _rb['groupNames']: + for binding in bindings: + if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \ + binding['groupNames'] is not None and \ + self.config.config_options['group']['value'] in binding['groupNames']: self.role_binding = binding return True diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py index 37a685ebb..6fc8145c8 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_user.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py @@ -32,36 +32,36 @@ class PolicyUser(OpenShiftCLI): ''' Class to handle attaching policies to users ''' def __init__(self, - policy_config, + config, verbose=False): ''' Constructor for PolicyUser ''' - super(PolicyUser, self).__init__(policy_config.namespace, policy_config.kubeconfig, verbose) - self.config = policy_config + super(PolicyUser, self).__init__(config.namespace, config.kubeconfig, verbose) + self.config = config self.verbose = verbose self._rolebinding = None self._scc = None - self._cluster_policy_bindings = None - self._policy_bindings = None + self._cluster_role_bindings = None + self._role_bindings = None @property - def policybindings(self): - if self._policy_bindings is None: - results = self._get('policybindings', None) + def rolebindings(self): + if self._role_bindings is None: + results = self._get('rolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve policybindings') - self._policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve rolebindings') + self._role_bindings = results['results'][0]['items'] - return self._policy_bindings + return self._role_bindings @property - def clusterpolicybindings(self): - if self._cluster_policy_bindings is None: - results = self._get('clusterpolicybindings', None) + def clusterrolebindings(self): + if self._cluster_role_bindings is None: + results = self._get('clusterrolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') - self._cluster_policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve clusterrolebindings') + self._cluster_role_bindings = results['results'][0]['items'] - return self._cluster_policy_bindings + return self._cluster_role_bindings @property def role_binding(self): @@ -99,18 +99,17 @@ class PolicyUser(OpenShiftCLI): ''' return whether role_binding exists ''' bindings = None if self.config.config_options['resource_kind']['value'] == 'cluster-role': - bindings = self.clusterpolicybindings + bindings = self.clusterrolebindings else: - bindings = self.policybindings + bindings = self.rolebindings if bindings is None: return False - for binding in bindings['roleBindings']: - _rb = binding['roleBinding'] - if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ - _rb['userNames'] is not None and \ - self.config.config_options['user']['value'] in _rb['userNames']: + for binding in bindings: + if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \ + binding['userNames'] is not None and \ + self.config.config_options['user']['value'] in binding['userNames']: self.role_binding = binding return True diff --git a/roles/lib_openshift/src/class/oc_clusterrole.py b/roles/lib_openshift/src/class/oc_clusterrole.py index ae6795446..328e5cb67 100644 --- a/roles/lib_openshift/src/class/oc_clusterrole.py +++ b/roles/lib_openshift/src/class/oc_clusterrole.py @@ -56,7 +56,7 @@ class OCClusterRole(OpenShiftCLI): self.clusterrole = ClusterRole(content=result['results'][0]) result['results'] = self.clusterrole.yaml_dict - elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']: + elif '"{}" not found'.format(self.name) in result['stderr']: result['returncode'] = 0 self.clusterrole = None diff --git a/roles/lib_openshift/src/class/oc_pvc.py b/roles/lib_openshift/src/class/oc_pvc.py index c73abc47c..6b566c301 100644 --- a/roles/lib_openshift/src/class/oc_pvc.py +++ b/roles/lib_openshift/src/class/oc_pvc.py @@ -85,6 +85,8 @@ class OCPVC(OpenShiftCLI): params['kubeconfig'], params['access_modes'], params['volume_capacity'], + params['selector'], + params['storage_class_name'], ) oc_pvc = OCPVC(pconfig, verbose=params['debug']) diff --git a/roles/lib_openshift/src/doc/pvc b/roles/lib_openshift/src/doc/pvc index 9240f2a0f..268ad0b94 100644 --- a/roles/lib_openshift/src/doc/pvc +++ b/roles/lib_openshift/src/doc/pvc @@ -59,6 +59,18 @@ options: - ReadOnlyMany - ReadWriteMany aliases: [] + storage_class_name: + description: + - The storage class name for the PVC + required: false + default: None + aliases: [] + selector: + description: + - A hash of key/values for the matchLabels + required: false + default: None + aliases: [] author: - "Kenny Woodson <kwoodson@redhat.com>" extends_documentation_fragment: [] diff --git a/roles/lib_openshift/src/lib/pvc.py b/roles/lib_openshift/src/lib/pvc.py index 929b50990..d1e935c32 100644 --- a/roles/lib_openshift/src/lib/pvc.py +++ b/roles/lib_openshift/src/lib/pvc.py @@ -11,7 +11,9 @@ class PersistentVolumeClaimConfig(object): namespace, kubeconfig, access_modes=None, - vol_capacity='1G'): + vol_capacity='1G', + selector=None, + storage_class_name=None): ''' constructor for handling pvc options ''' self.kubeconfig = kubeconfig self.name = sname @@ -19,6 +21,8 @@ class PersistentVolumeClaimConfig(object): self.access_modes = access_modes self.vol_capacity = vol_capacity self.data = {} + self.selector = selector + self.storage_class_name = storage_class_name self.create_dict() @@ -36,12 +40,16 @@ class PersistentVolumeClaimConfig(object): self.data['spec']['accessModes'] = ['ReadWriteOnce'] if self.access_modes: self.data['spec']['accessModes'] = self.access_modes + if self.selector: + self.data['spec']['selector'] = {'matchLabels': self.selector} # storage capacity self.data['spec']['resources'] = {} self.data['spec']['resources']['requests'] = {} self.data['spec']['resources']['requests']['storage'] = self.vol_capacity + if self.storage_class_name: + self.data['spec']['storageClassName'] = self.storage_class_name # pylint: disable=too-many-instance-attributes,too-many-public-methods class PersistentVolumeClaim(Yedit): @@ -51,13 +59,29 @@ class PersistentVolumeClaim(Yedit): volume_name_path = "spec.volumeName" bound_path = "status.phase" kind = 'PersistentVolumeClaim' + selector_path = "spec.selector.matchLabels" + storage_class_name_path = "spec.storageClassName" def __init__(self, content): - '''RoleBinding constructor''' + '''PersistentVolumeClaim constructor''' super(PersistentVolumeClaim, self).__init__(content=content) self._access_modes = None self._volume_capacity = None self._volume_name = None + self._selector = None + self._storage_class_name = None + + @property + def storage_class_name(self): + ''' storage_class_name property ''' + if self._storage_class_name is None: + self._storage_class_name = self.get_storage_class_name() + return self._storage_class_name + + @storage_class_name.setter + def storage_class_name(self, data): + ''' storage_class_name property setter''' + self._storage_class_name = data @property def volume_name(self): @@ -72,6 +96,24 @@ class PersistentVolumeClaim(Yedit): self._volume_name = data @property + def selector(self): + ''' selector property ''' + if self._selector is None: + self._selector = self.get_selector() + if not isinstance(self._selector, dict): + self._selector = dict(self._selector) + + return self._selector + + @selector.setter + def selector(self, data): + ''' selector property setter''' + if not isinstance(data, dict): + data = dict(data) + + self._selector = data + + @property def access_modes(self): ''' access_modes property ''' if self._access_modes is None: @@ -101,6 +143,14 @@ class PersistentVolumeClaim(Yedit): ''' volume_capacity property setter''' self._volume_capacity = data + def get_storage_class_name(self): + '''get storage_class_name''' + return self.get(PersistentVolumeClaim.storage_class_name_path) or [] + + def get_selector(self): + '''get selector''' + return self.get(PersistentVolumeClaim.selector_path) or [] + def get_access_modes(self): '''get access_modes''' return self.get(PersistentVolumeClaim.access_modes_path) or [] diff --git a/roles/lib_openshift/src/test/integration/oc_pvc.yml b/roles/lib_openshift/src/test/integration/oc_pvc.yml new file mode 100755 index 000000000..fb3a4781f --- /dev/null +++ b/roles/lib_openshift/src/test/integration/oc_pvc.yml @@ -0,0 +1,28 @@ +#!/usr/bin/ansible-playbook --module-path=../../../library/ +# ./oc_pvc.yml -e "cli_master_test=$OPENSHIFT_MASTER +--- +- hosts: "{{ cli_master_test }}" + gather_facts: no + user: root + tasks: + - name: create pvc + oc_pvc: + state: present + name: oc-pvc-create-test + namespace: default + volume_capacity: 3G + access_modes: + - ReadWriteOnce + selector: + foo: bar + storage_class_name: my-storage-class-name + register: pvcout + - debug: var=pvcout + + - assert: + that: + - pvcout.results.results[0]['metadata']['name'] == 'oc-pvc-create-test' + - pvcout.results.results[0]['spec']['storageClassName'] == 'my-storage-class-name' + - pvcout.results.results[0]['spec']['selector']['matchLabels']['foo'] == 'bar' + - pvcout.changed + msg: pvc create failed. diff --git a/roles/lib_openshift/src/test/unit/test_oc_pvc.py b/roles/lib_openshift/src/test/unit/test_oc_pvc.py index 82187917d..a96f2e4a7 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_pvc.py +++ b/roles/lib_openshift/src/test/unit/test_oc_pvc.py @@ -30,6 +30,8 @@ class OCPVCTest(unittest.TestCase): 'name': 'mypvc', 'namespace': 'test', 'volume_capacity': '1G', + 'selector': {'foo': 'bar', 'abc': 'a123'}, + 'storage_class_name': 'mystorage', 'access_modes': 'ReadWriteMany'} @mock.patch('oc_pvc.Utils.create_tmpfile_copy') @@ -65,6 +67,13 @@ class OCPVCTest(unittest.TestCase): "storage": "1Gi" } }, + "selector": { + "matchLabels": { + "foo": "bar", + "abc": "a123" + } + }, + "storageClassName": "myStorage", "volumeName": "pv-aws-ow5vl" }, "status": { @@ -93,6 +102,8 @@ class OCPVCTest(unittest.TestCase): self.assertTrue(results['changed']) self.assertEqual(results['results']['results'][0]['metadata']['name'], 'mypvc') + self.assertEqual(results['results']['results'][0]['spec']['storageClassName'], 'myStorage') + self.assertEqual(results['results']['results'][0]['spec']['selector']['matchLabels']['foo'], 'bar') @mock.patch('oc_pvc.Utils.create_tmpfile_copy') @mock.patch('oc_pvc.OCPVC._run') diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 49cc51b48..42c4945b4 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -2222,14 +2222,10 @@ class OpenShiftFacts(object): product_version = self.system_facts['ansible_product_version'] virt_type = self.system_facts['ansible_virtualization_type'] virt_role = self.system_facts['ansible_virtualization_role'] + bios_vendor = self.system_facts['ansible_system_vendor'] provider = None metadata = None - # TODO: this is not exposed through module_utils/facts.py in ansible, - # need to create PR for ansible to expose it - bios_vendor = get_file_content( # noqa: F405 - '/sys/devices/virtual/dmi/id/bios_vendor' - ) if bios_vendor == 'Google': provider = 'gce' metadata_url = ('http://metadata.google.internal/' diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index d2ef7cc71..97650e2ce 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -57,6 +57,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all']. - `openshift_logging_fluentd_buffer_queue_limit`: Buffer queue limit for Fluentd. Defaults to 1024. - `openshift_logging_fluentd_buffer_size_limit`: Buffer chunk limit for Fluentd. Defaults to 1m. +- `openshift_logging_fluentd_file_buffer_limit`: Fluentd will set the value to the file buffer limit. Defaults to '1Gi' per destination. - `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'. @@ -160,3 +161,18 @@ Elasticsearch OPS too, if using an OPS cluster: need to set this - `openshift_logging_mux_buffer_queue_limit`: Default `[1024]` - Buffer queue limit for Mux. - `openshift_logging_mux_buffer_size_limit`: Default `[1m]` - Buffer chunk limit for Mux. +- `openshift_logging_mux_file_buffer_limit`: Default `[2Gi]` per destination - Mux will + set the value to the file buffer limit. +- `openshift_logging_mux_file_buffer_storage_type`: Default `[emptydir]` - Storage + type for the file buffer. One of [`emptydir`, `pvc`, `hostmount`] + +- `openshift_logging_mux_file_buffer_pvc_size`: The requested size for the file buffer + PVC, when not provided the role will not generate any PVCs. Defaults to `4Gi`. +- `openshift_logging_mux_file_buffer_pvc_dynamic`: Whether or not to add the dynamic + PVC annotation for any generated PVCs. Defaults to 'False'. +- `openshift_logging_mux_file_buffer_pvc_pv_selector`: A key/value map added to a PVC + in order to select specific PVs. Defaults to 'None'. +- `openshift_logging_mux_file_buffer_pvc_prefix`: The prefix for the generated PVCs. + Defaults to 'logging-mux'. +- `openshift_logging_mux_file_buffer_storage_group`: The storage group used for Mux. + Defaults to '65534'. diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index ce7cfc433..be9943b0d 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -57,3 +57,5 @@ openshift_logging_fluentd_es_copy: false #fluentd_config_contents: #fluentd_throttle_contents: #fluentd_secureforward_contents: + +openshift_logging_fluentd_file_buffer_limit: 1Gi diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 index 970e5c2a5..a4cf9a149 100644 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -62,6 +62,8 @@ spec: - name: dockerdaemoncfg mountPath: /etc/docker readOnly: true + - name: filebufferstorage + mountPath: /var/lib/fluentd {% if openshift_logging_use_mux_client | bool %} - name: muxcerts mountPath: /etc/fluent/muxkeys @@ -112,6 +114,8 @@ spec: resource: limits.memory - name: "USE_MUX_CLIENT" value: "{{ openshift_logging_use_mux_client | default('false') | lower }}" + - name: "FILE_BUFFER_LIMIT" + value: "{{ openshift_logging_fluentd_file_buffer_limit | default('1Gi') }}" volumes: - name: runlogjournal hostPath: @@ -145,3 +149,6 @@ spec: secret: secretName: logging-mux {% endif %} + - name: filebufferstorage + hostPath: + path: "/var/lib/fluentd" diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index 797a27c1b..35fc7146f 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -47,3 +47,20 @@ openshift_logging_mux_ops_ca: /etc/fluent/keys/ca #mux_config_contents: #mux_throttle_contents: #mux_secureforward_contents: + +# One of ['emptydir', 'pvc', 'hostmount'] +openshift_logging_mux_file_buffer_storage_type: "emptydir" + +# pvc options +# the name of the PVC we will bind to -- create it if it does not exist +openshift_logging_mux_file_buffer_pvc_name: "logging-mux-pvc" + +# required if the PVC does not already exist +openshift_logging_mux_file_buffer_pvc_size: 4Gi +openshift_logging_mux_file_buffer_pvc_dynamic: false +openshift_logging_mux_file_buffer_pvc_pv_selector: {} +openshift_logging_mux_file_buffer_pvc_access_modes: ['ReadWriteOnce'] +openshift_logging_mux_file_buffer_storage_group: '65534' + +openshift_logging_mux_file_buffer_pvc_prefix: "logging-mux" +openshift_logging_mux_file_buffer_limit: 2Gi diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 02815806a..8ec93de7d 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -177,6 +177,18 @@ check_mode: no changed_when: no +- name: Create Mux PVC + oc_pvc: + state: present + name: "{{ openshift_logging_mux_file_buffer_pvc_name }}" + namespace: "{{ openshift_logging_mux_namespace }}" + volume_capacity: "{{ openshift_logging_mux_file_buffer_pvc_size }}" + access_modes: "{{ openshift_logging_mux_file_buffer_pvc_access_modes | list }}" + selector: "{{ openshift_logging_mux_file_buffer_pvc_pv_selector }}" + storage_class_name: "{{ openshift_logging_mux_file_buffer_pvc_storage_class_name | default('', true) }}" + when: + - openshift_logging_mux_file_buffer_storage_type == "pvc" + - name: Set logging-mux DC oc_obj: state: present diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2 index 2b3b64bb8..e43d9d397 100644 --- a/roles/openshift_logging_mux/templates/mux.j2 +++ b/roles/openshift_logging_mux/templates/mux.j2 @@ -66,6 +66,8 @@ spec: - name: muxcerts mountPath: /etc/fluent/muxkeys readOnly: true + - name: filebufferstorage + mountPath: /var/lib/fluentd env: - name: "K8S_HOST_URL" value: "{{openshift_logging_mux_master_url}}" @@ -115,6 +117,8 @@ spec: resourceFieldRef: containerName: "mux" resource: limits.memory + - name: "FILE_BUFFER_LIMIT" + value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}" volumes: - name: config configMap: @@ -131,3 +135,13 @@ spec: - name: muxcerts secret: secretName: logging-mux + - name: filebufferstorage +{% if openshift_logging_mux_file_buffer_storage_type == 'pvc' %} + persistentVolumeClaim: + claimName: {{ openshift_logging_mux_file_buffer_pvc_name }} +{% elif openshift_logging_mux_file_buffer_storage_type == 'hostmount' %} + hostPath: + path: "/var/log/fluentd" +{% else %} + emptydir: {} +{% endif %} diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 0c4ee319c..1f182a25c 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -133,12 +133,12 @@ - block: - name: check whether our docker-registry setting exists in the env file command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master" - ignore_errors: true + failed_when: false changed_when: false register: already_set - set_fact: - openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" + openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (already_set.stdout is defined and already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" - name: Set fact of all etcd host IPs openshift_facts: diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index af3ebc6d2..7964bbb48 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -164,16 +164,16 @@ masterClients: externalKubernetesClientConnectionOverrides: acceptContentTypes: application/vnd.kubernetes.protobuf,application/json contentType: application/vnd.kubernetes.protobuf - burst: 400 - qps: 200 + burst: {{ openshift_master_external_ratelimit_burst | default(400) }} + qps: {{ openshift_master_external_ratelimit_qps | default(200) }} {% endif %} externalKubernetesKubeConfig: "" {% if openshift.common.version_gte_3_3_or_1_3 | bool %} openshiftLoopbackClientConnectionOverrides: acceptContentTypes: application/vnd.kubernetes.protobuf,application/json contentType: application/vnd.kubernetes.protobuf - burst: 600 - qps: 300 + burst: {{ openshift_master_loopback_ratelimit_burst | default(600) }} + qps: {{ openshift_master_loopback_ratelimit_qps | default(300) }} {% endif %} openshiftLoopbackKubeConfig: openshift-master.kubeconfig masterPublicURL: {{ openshift.master.public_api_url }} diff --git a/roles/openshift_metrics/tasks/generate_rolebindings.yaml b/roles/openshift_metrics/tasks/generate_rolebindings.yaml index 1304ab8b5..9882b1eb5 100644 --- a/roles/openshift_metrics/tasks/generate_rolebindings.yaml +++ b/roles/openshift_metrics/tasks/generate_rolebindings.yaml @@ -37,3 +37,12 @@ src: hawkular_metrics_role.j2 dest: "{{ mktemp.stdout }}/templates/hawkular-cluster-role.yaml" changed_when: no + +- name: Set hawkular cluster roles + oc_obj: + name: hawkular-metrics + namespace: "{{ openshift_metrics_hawkular_agent_namespace }}" + kind: clusterrole + files: + - "{{ mktemp.stdout }}/templates/hawkular-cluster-role.yaml" + delete_after: true diff --git a/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml b/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml index e9d70f74f..db27680fe 100644 --- a/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml +++ b/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml @@ -13,3 +13,15 @@ - name: cassandra secret: hawkular-cassandra-secrets changed_when: no + +- name: Set serviceaccounts for hawkular metrics/cassandra + oc_obj: + name: "{{ item }}" + kind: serviceaccount + namespace: "{{ openshift_metrics_hawkular_agent_namespace }}" + files: + - "{{ mktemp.stdout }}/templates/metrics-{{ item }}-sa.yaml" + delete_after: true + with_items: + - hawkular + - cassandra diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index e19d82ddc..4fb841add 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -12,6 +12,7 @@ galaxy_info: categories: - cloud dependencies: +- role: openshift_node_facts - role: lib_openshift - role: openshift_common - role: openshift_clock diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml new file mode 100644 index 000000000..1186062eb --- /dev/null +++ b/roles/openshift_node/tasks/config/configure-node-settings.yml @@ -0,0 +1,16 @@ +--- +- name: Configure Node settings + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + create: true + with_items: + - regex: '^OPTIONS=' + line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}" + - regex: '^CONFIG_FILE=' + line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml" + - regex: '^IMAGE_VERSION=' + line: "IMAGE_VERSION={{ openshift_image_tag }}" + notify: + - restart node diff --git a/roles/openshift_node/tasks/config/configure-proxy-settings.yml b/roles/openshift_node/tasks/config/configure-proxy-settings.yml new file mode 100644 index 000000000..d60794305 --- /dev/null +++ b/roles/openshift_node/tasks/config/configure-proxy-settings.yml @@ -0,0 +1,17 @@ +--- +- name: Configure Proxy Settings + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + create: true + with_items: + - regex: '^HTTP_PROXY=' + line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}" + - regex: '^HTTPS_PROXY=' + line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}" + - regex: '^NO_PROXY=' + line: "NO_PROXY={{ openshift.common.no_proxy | default([]) }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}" + when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '') + notify: + - restart node diff --git a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml new file mode 100644 index 000000000..ee91a88ab --- /dev/null +++ b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml @@ -0,0 +1,8 @@ +--- +- name: Install Node dependencies docker service file + template: + dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service" + src: openshift.docker.node.dep.service + notify: + - reload systemd units + - restart node diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml new file mode 100644 index 000000000..f92ff79b5 --- /dev/null +++ b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml @@ -0,0 +1,8 @@ +--- +- name: Install Node docker service file + template: + dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" + src: openshift.docker.node.service + notify: + - reload systemd units + - restart node diff --git a/roles/openshift_node/tasks/config/install-ovs-docker-service-file.yml b/roles/openshift_node/tasks/config/install-ovs-docker-service-file.yml new file mode 100644 index 000000000..c2c5ea1d4 --- /dev/null +++ b/roles/openshift_node/tasks/config/install-ovs-docker-service-file.yml @@ -0,0 +1,8 @@ +--- +- name: Install OpenvSwitch docker service file + template: + dest: "/etc/systemd/system/openvswitch.service" + src: openvswitch.docker.service + notify: + - reload systemd units + - restart openvswitch diff --git a/roles/openshift_node/tasks/config/install-ovs-service-env-file.yml b/roles/openshift_node/tasks/config/install-ovs-service-env-file.yml new file mode 100644 index 000000000..1d75a3355 --- /dev/null +++ b/roles/openshift_node/tasks/config/install-ovs-service-env-file.yml @@ -0,0 +1,8 @@ +--- +- name: Create the openvswitch service env file + template: + src: openvswitch.sysconfig.j2 + dest: /etc/sysconfig/openvswitch + notify: + - reload systemd units + - restart openvswitch diff --git a/roles/openshift_node/tasks/config/workaround-bz1331590-ovs-oom-fix.yml b/roles/openshift_node/tasks/config/workaround-bz1331590-ovs-oom-fix.yml new file mode 100644 index 000000000..5df1abc79 --- /dev/null +++ b/roles/openshift_node/tasks/config/workaround-bz1331590-ovs-oom-fix.yml @@ -0,0 +1,13 @@ +--- +# May be a temporary workaround. +# https://bugzilla.redhat.com/show_bug.cgi?id=1331590 +- name: Create OpenvSwitch service.d directory + file: path=/etc/systemd/system/openvswitch.service.d/ state=directory + +- name: Install OpenvSwitch service OOM fix + template: + dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf" + src: openvswitch-avoid-oom.conf + notify: + - reload systemd units + - restart openvswitch diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 8b4931e7c..87b1f6537 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -6,34 +6,6 @@ (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise'] -- name: Set node facts - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - # Reset node labels to an empty dictionary. - - role: node - local_facts: - labels: {} - - role: node - local_facts: - annotations: "{{ openshift_node_annotations | default(none) }}" - debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" - iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" - kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" - labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}" - registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}" - schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" - sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" - storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" - set_node_ip: "{{ openshift_set_node_ip | default(None) }}" - node_image: "{{ osn_image | default(None) }}" - ovs_image: "{{ osn_ovs_image | default(None) }}" - proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" - local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}" - dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}" - env_vars: "{{ openshift_node_env_vars | default(None) }}" - # https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory - name: Check for swap usage command: grep "^[^#].*swap" /etc/fstab diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 2ccc28461..b86bb1549 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -2,15 +2,8 @@ # This file is included both in the openshift_master role and in the upgrade # playbooks. -- name: Install Node dependencies docker service file - template: - dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service" - src: openshift.docker.node.dep.service - register: install_node_dep_result +- include: config/install-node-deps-docker-service-file.yml when: openshift.common.is_containerized | bool - notify: - - reload systemd units - - restart node - block: - name: Pre-pull node image @@ -19,14 +12,7 @@ register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" - - name: Install Node docker service file - template: - dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" - src: openshift.docker.node.service - register: install_node_result - notify: - - reload systemd units - - restart node + - include: config/install-node-docker-service-file.yml when: - openshift.common.is_containerized | bool - not openshift.common.is_node_system_container | bool @@ -35,21 +21,13 @@ template: dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" src: "node.service.j2" - register: install_node_result when: not openshift.common.is_containerized | bool notify: - reload systemd units - restart node -- name: Create the openvswitch service env file - template: - src: openvswitch.sysconfig.j2 - dest: /etc/sysconfig/openvswitch +- include: config/install-ovs-service-env-file.yml when: openshift.common.is_containerized | bool - register: install_ovs_sysconfig - notify: - - reload systemd units - - restart openvswitch - name: Install Node system container include: node_system_container.yml @@ -64,22 +42,9 @@ - openshift.common.is_containerized | bool - openshift.common.is_openvswitch_system_container | bool -# May be a temporary workaround. -# https://bugzilla.redhat.com/show_bug.cgi?id=1331590 -- name: Create OpenvSwitch service.d directory - file: path=/etc/systemd/system/openvswitch.service.d/ state=directory +- include: config/workaround-bz1331590-ovs-oom-fix.yml when: openshift.common.use_openshift_sdn | default(true) | bool -- name: Install OpenvSwitch service OOM fix - template: - dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf" - src: openvswitch-avoid-oom.conf - when: openshift.common.use_openshift_sdn | default(true) | bool - register: install_oom_fix_result - notify: - - reload systemd units - - restart openvswitch - - block: - name: Pre-pull openvswitch image command: > @@ -87,47 +52,11 @@ register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" - - name: Install OpenvSwitch docker service file - template: - dest: "/etc/systemd/system/openvswitch.service" - src: openvswitch.docker.service - notify: - - reload systemd units - - restart openvswitch + - include: config/install-ovs-docker-service-file.yml when: - openshift.common.is_containerized | bool - openshift.common.use_openshift_sdn | default(true) | bool - not openshift.common.is_openvswitch_system_container | bool -- name: Configure Node settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - create: true - with_items: - - regex: '^OPTIONS=' - line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}" - - regex: '^CONFIG_FILE=' - line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml" - - regex: '^IMAGE_VERSION=' - line: "IMAGE_VERSION={{ openshift_image_tag }}" - notify: - - restart node - -- name: Configure Proxy Settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - create: true - with_items: - - regex: '^HTTP_PROXY=' - line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}" - - regex: '^HTTPS_PROXY=' - line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}" - - regex: '^NO_PROXY=' - line: "NO_PROXY={{ openshift.common.no_proxy | default([]) }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}" - when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '') - notify: - - restart node +- include: config/configure-node-settings.yml +- include: config/configure-proxy-settings.yml diff --git a/roles/openshift_node_facts/meta/main.yml b/roles/openshift_node_facts/meta/main.yml new file mode 100644 index 000000000..59bf680ce --- /dev/null +++ b/roles/openshift_node_facts/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Andrew Butcher + description: OpenShift Node Facts + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- role: openshift_facts diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml new file mode 100644 index 000000000..c268c945e --- /dev/null +++ b/roles/openshift_node_facts/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- set_fact: + openshift_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}" + when: + - openshift_node_debug_level is not defined + - lookup('oo_option', 'openshift_node_debug_level') != "" + +- name: Set node facts + openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + # Reset node labels to an empty dictionary. + - role: node + local_facts: + labels: {} + - role: node + local_facts: + annotations: "{{ openshift_node_annotations | default(none) }}" + debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" + iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" + kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" + labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}" + registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}" + schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" + sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" + storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" + set_node_ip: "{{ openshift_set_node_ip | default(None) }}" + node_image: "{{ osn_image | default(None) }}" + ovs_image: "{{ osn_ovs_image | default(None) }}" + proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" + local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}" + dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}" + env_vars: "{{ openshift_node_env_vars | default(None) }}" diff --git a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml new file mode 100644 index 000000000..1186062eb --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml @@ -0,0 +1,16 @@ +--- +- name: Configure Node settings + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + create: true + with_items: + - regex: '^OPTIONS=' + line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}" + - regex: '^CONFIG_FILE=' + line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml" + - regex: '^IMAGE_VERSION=' + line: "IMAGE_VERSION={{ openshift_image_tag }}" + notify: + - restart node diff --git a/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml new file mode 100644 index 000000000..d60794305 --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml @@ -0,0 +1,17 @@ +--- +- name: Configure Proxy Settings + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + create: true + with_items: + - regex: '^HTTP_PROXY=' + line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}" + - regex: '^HTTPS_PROXY=' + line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}" + - regex: '^NO_PROXY=' + line: "NO_PROXY={{ openshift.common.no_proxy | default([]) }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}" + when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '') + notify: + - restart node diff --git a/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml new file mode 100644 index 000000000..ee91a88ab --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml @@ -0,0 +1,8 @@ +--- +- name: Install Node dependencies docker service file + template: + dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service" + src: openshift.docker.node.dep.service + notify: + - reload systemd units + - restart node diff --git a/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml new file mode 100644 index 000000000..f92ff79b5 --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml @@ -0,0 +1,8 @@ +--- +- name: Install Node docker service file + template: + dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" + src: openshift.docker.node.service + notify: + - reload systemd units + - restart node diff --git a/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml new file mode 100644 index 000000000..c2c5ea1d4 --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml @@ -0,0 +1,8 @@ +--- +- name: Install OpenvSwitch docker service file + template: + dest: "/etc/systemd/system/openvswitch.service" + src: openvswitch.docker.service + notify: + - reload systemd units + - restart openvswitch diff --git a/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml b/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml new file mode 100644 index 000000000..1d75a3355 --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml @@ -0,0 +1,8 @@ +--- +- name: Create the openvswitch service env file + template: + src: openvswitch.sysconfig.j2 + dest: /etc/sysconfig/openvswitch + notify: + - reload systemd units + - restart openvswitch diff --git a/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml b/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml new file mode 100644 index 000000000..5df1abc79 --- /dev/null +++ b/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml @@ -0,0 +1,13 @@ +--- +# May be a temporary workaround. +# https://bugzilla.redhat.com/show_bug.cgi?id=1331590 +- name: Create OpenvSwitch service.d directory + file: path=/etc/systemd/system/openvswitch.service.d/ state=directory + +- name: Install OpenvSwitch service OOM fix + template: + dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf" + src: openvswitch-avoid-oom.conf + notify: + - reload systemd units + - restart openvswitch diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml index 9b3805eea..4e9550150 100644 --- a/roles/openshift_node_upgrade/tasks/systemd_units.yml +++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml @@ -18,86 +18,20 @@ # This file is included both in the openshift_master role and in the upgrade # playbooks. -- name: Install Node dependencies docker service file - template: - dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service" - src: openshift.docker.node.dep.service +- include: config/install-node-deps-docker-service-file.yml when: openshift.common.is_containerized | bool - notify: - - reload systemd units - - restart node -- name: Install Node docker service file - template: - dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" - src: openshift.docker.node.service +- include: config/install-node-docker-service-file.yml when: openshift.common.is_containerized | bool - notify: - - reload systemd units - - restart node -- name: Create the openvswitch service env file - template: - src: openvswitch.sysconfig.j2 - dest: /etc/sysconfig/openvswitch +- include: config/install-ovs-service-env-file.yml when: openshift.common.is_containerized | bool - notify: - - reload systemd units - - restart openvswitch -# May be a temporary workaround. -# https://bugzilla.redhat.com/show_bug.cgi?id=1331590 -- name: Create OpenvSwitch service.d directory - file: path=/etc/systemd/system/openvswitch.service.d/ state=directory +- include: config/workaround-bz1331590-ovs-oom-fix.yml when: openshift.common.use_openshift_sdn | default(true) | bool -- name: Install OpenvSwitch service OOM fix - template: - dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf" - src: openvswitch-avoid-oom.conf - when: openshift.common.use_openshift_sdn | default(true) | bool - notify: - - reload systemd units - - restart openvswitch - -- name: Install OpenvSwitch docker service file - template: - dest: "/etc/systemd/system/openvswitch.service" - src: openvswitch.docker.service +- include: config/install-ovs-docker-service-file.yml when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | default(true) | bool - notify: - - reload systemd units - - restart openvswitch - -- name: Configure Node settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - create: true - with_items: - - regex: '^OPTIONS=' - line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}" - - regex: '^CONFIG_FILE=' - line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml" - - regex: '^IMAGE_VERSION=' - line: "IMAGE_VERSION={{ openshift_image_tag }}" - notify: - - restart node -- name: Configure Proxy Settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - create: true - with_items: - - regex: '^HTTP_PROXY=' - line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}" - - regex: '^HTTPS_PROXY=' - line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}" - - regex: '^NO_PROXY=' - line: "NO_PROXY={{ openshift.common.no_proxy | default([]) }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}" - when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '') - notify: - - restart node +- include: config/configure-node-settings.yml +- include: config/configure-proxy-settings.yml diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index a846889ca..2823a7610 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -1,6 +1,5 @@ --- openshift_storage_glusterfs_timeout: 300 -openshift_storage_glusterfs_namespace: 'glusterfs' openshift_storage_glusterfs_is_native: True openshift_storage_glusterfs_name: 'storage' openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host" @@ -25,6 +24,7 @@ openshift_storage_glusterfs_heketi_ssh_port: 22 openshift_storage_glusterfs_heketi_ssh_user: 'root' openshift_storage_glusterfs_heketi_ssh_sudo: False openshift_storage_glusterfs_heketi_ssh_keyfile: '/dev/null' +openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}" openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}" openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default(openshift_storage_glusterfs_namespace) }}" |