diff options
Diffstat (limited to 'roles')
49 files changed, 452 insertions, 173 deletions
diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml index 2d0fb95ae..07bb16ea7 100644 --- a/roles/contiv/tasks/netmaster_iptables.yml +++ b/roles/contiv/tasks/netmaster_iptables.yml @@ -2,7 +2,7 @@ - name: Netmaster IPtables | Get iptables rules command: iptables -L --wait register: iptablesrules - always_run: yes + check_mode: no - name: Netmaster IPtables | Enable iptables at boot service: diff --git a/roles/contiv/tasks/netplugin_iptables.yml b/roles/contiv/tasks/netplugin_iptables.yml index 184c595c5..3ea34645d 100644 --- a/roles/contiv/tasks/netplugin_iptables.yml +++ b/roles/contiv/tasks/netplugin_iptables.yml @@ -2,7 +2,7 @@ - name: Netplugin IPtables | Get iptables rules command: iptables -L --wait register: iptablesrules - always_run: yes + check_mode: no - name: Netplugin IPtables | Enable iptables at boot service: diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml index 926e0e0be..7a4972fca 100644 --- a/roles/contiv_facts/tasks/main.yml +++ b/roles/contiv_facts/tasks/main.yml @@ -3,7 +3,7 @@ stat: path=/run/ostree-booted register: s changed_when: false - always_run: yes + check_mode: no - name: Init the is_atomic fact set_fact: @@ -17,7 +17,7 @@ - name: Determine if CoreOS raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'" register: distro - always_run: yes + check_mode: no - name: Init the is_coreos fact set_fact: @@ -61,7 +61,7 @@ stat: path=/usr/bin/rpm register: s changed_when: false - always_run: yes + check_mode: no - name: Init the has_rpm fact set_fact: diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml index d2f66dac5..07401a6dd 100644 --- a/roles/contiv_facts/tasks/rpm.yml +++ b/roles/contiv_facts/tasks/rpm.yml @@ -4,7 +4,7 @@ register: s changed_when: false failed_when: false - always_run: yes + check_mode: no - name: Set the has_firewalld fact set_fact: @@ -16,7 +16,7 @@ register: s changed_when: false failed_when: false - always_run: yes + check_mode: no - name: Set the has_iptables fact set_fact: diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd_migrate/tasks/check.yml index 800073873..b66696b55 100644 --- a/roles/etcd_migrate/tasks/check.yml +++ b/roles/etcd_migrate/tasks/check.yml @@ -1,4 +1,8 @@ --- +- fail: + msg: "Currently etcd v3 migration is unsupported while we test it more thoroughly" + when: not openshift_enable_unsupported_configurations | default(false) | bool + # Check the cluster is healthy - include: check_cluster_health.yml diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 221ef5094..7154fd839 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -1959,28 +1959,28 @@ class PolicyGroup(OpenShiftCLI): self.verbose = verbose self._rolebinding = None self._scc = None - self._cluster_policy_bindings = None - self._policy_bindings = None + self._cluster_role_bindings = None + self._role_bindings = None @property - def policybindings(self): - if self._policy_bindings is None: - results = self._get('clusterpolicybindings', None) + def rolebindings(self): + if self._role_bindings is None: + results = self._get('rolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve policybindings') - self._policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve rolebindings') + self._role_bindings = results['results'][0]['items'] - return self._policy_bindings + return self._role_bindings @property - def clusterpolicybindings(self): - if self._cluster_policy_bindings is None: - results = self._get('clusterpolicybindings', None) + def clusterrolebindings(self): + if self._cluster_role_bindings is None: + results = self._get('clusterrolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') - self._cluster_policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve clusterrolebindings') + self._cluster_role_bindings = results['results'][0]['items'] - return self._cluster_policy_bindings + return self._cluster_role_bindings @property def role_binding(self): @@ -2023,18 +2023,17 @@ class PolicyGroup(OpenShiftCLI): ''' return whether role_binding exists ''' bindings = None if self.config.config_options['resource_kind']['value'] == 'cluster-role': - bindings = self.clusterpolicybindings + bindings = self.clusterrolebindings else: - bindings = self.policybindings + bindings = self.rolebindings if bindings is None: return False - for binding in bindings['roleBindings']: - _rb = binding['roleBinding'] - if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ - _rb['groupNames'] is not None and \ - self.config.config_options['group']['value'] in _rb['groupNames']: + for binding in bindings: + if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \ + binding['groupNames'] is not None and \ + self.config.config_options['group']['value'] in binding['groupNames']: self.role_binding = binding return True diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 071562875..3fcf49799 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -1950,36 +1950,36 @@ class PolicyUser(OpenShiftCLI): ''' Class to handle attaching policies to users ''' def __init__(self, - policy_config, + config, verbose=False): ''' Constructor for PolicyUser ''' - super(PolicyUser, self).__init__(policy_config.namespace, policy_config.kubeconfig, verbose) - self.config = policy_config + super(PolicyUser, self).__init__(config.namespace, config.kubeconfig, verbose) + self.config = config self.verbose = verbose self._rolebinding = None self._scc = None - self._cluster_policy_bindings = None - self._policy_bindings = None + self._cluster_role_bindings = None + self._role_bindings = None @property - def policybindings(self): - if self._policy_bindings is None: - results = self._get('policybindings', None) + def rolebindings(self): + if self._role_bindings is None: + results = self._get('rolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve policybindings') - self._policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve rolebindings') + self._role_bindings = results['results'][0]['items'] - return self._policy_bindings + return self._role_bindings @property - def clusterpolicybindings(self): - if self._cluster_policy_bindings is None: - results = self._get('clusterpolicybindings', None) + def clusterrolebindings(self): + if self._cluster_role_bindings is None: + results = self._get('clusterrolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') - self._cluster_policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve clusterrolebindings') + self._cluster_role_bindings = results['results'][0]['items'] - return self._cluster_policy_bindings + return self._cluster_role_bindings @property def role_binding(self): @@ -2017,18 +2017,17 @@ class PolicyUser(OpenShiftCLI): ''' return whether role_binding exists ''' bindings = None if self.config.config_options['resource_kind']['value'] == 'cluster-role': - bindings = self.clusterpolicybindings + bindings = self.clusterrolebindings else: - bindings = self.policybindings + bindings = self.rolebindings if bindings is None: return False - for binding in bindings['roleBindings']: - _rb = binding['roleBinding'] - if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ - _rb['userNames'] is not None and \ - self.config.config_options['user']['value'] in _rb['userNames']: + for binding in bindings: + if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \ + binding['userNames'] is not None and \ + self.config.config_options['user']['value'] in binding['userNames']: self.role_binding = binding return True diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index 289f08b83..d101eac1c 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -1671,7 +1671,7 @@ class OCClusterRole(OpenShiftCLI): self.clusterrole = ClusterRole(content=result['results'][0]) result['results'] = self.clusterrole.yaml_dict - elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']: + elif '"{}" not found'.format(self.name) in result['stderr']: result['returncode'] = 0 self.clusterrole = None diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index a88639bfc..a21540962 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -110,6 +110,18 @@ options: - ReadOnlyMany - ReadWriteMany aliases: [] + storage_class_name: + description: + - The storage class name for the PVC + required: false + default: None + aliases: [] + selector: + description: + - A hash of key/values for the matchLabels + required: false + default: None + aliases: [] author: - "Kenny Woodson <kwoodson@redhat.com>" extends_documentation_fragment: [] @@ -1420,7 +1432,9 @@ class PersistentVolumeClaimConfig(object): namespace, kubeconfig, access_modes=None, - vol_capacity='1G'): + vol_capacity='1G', + selector=None, + storage_class_name=None): ''' constructor for handling pvc options ''' self.kubeconfig = kubeconfig self.name = sname @@ -1428,6 +1442,8 @@ class PersistentVolumeClaimConfig(object): self.access_modes = access_modes self.vol_capacity = vol_capacity self.data = {} + self.selector = selector + self.storage_class_name = storage_class_name self.create_dict() @@ -1445,12 +1461,16 @@ class PersistentVolumeClaimConfig(object): self.data['spec']['accessModes'] = ['ReadWriteOnce'] if self.access_modes: self.data['spec']['accessModes'] = self.access_modes + if self.selector: + self.data['spec']['selector'] = {'matchLabels': self.selector} # storage capacity self.data['spec']['resources'] = {} self.data['spec']['resources']['requests'] = {} self.data['spec']['resources']['requests']['storage'] = self.vol_capacity + if self.storage_class_name: + self.data['spec']['storageClassName'] = self.storage_class_name # pylint: disable=too-many-instance-attributes,too-many-public-methods class PersistentVolumeClaim(Yedit): @@ -1460,13 +1480,29 @@ class PersistentVolumeClaim(Yedit): volume_name_path = "spec.volumeName" bound_path = "status.phase" kind = 'PersistentVolumeClaim' + selector_path = "spec.selector.matchLabels" + storage_class_name_path = "spec.storageClassName" def __init__(self, content): - '''RoleBinding constructor''' + '''PersistentVolumeClaim constructor''' super(PersistentVolumeClaim, self).__init__(content=content) self._access_modes = None self._volume_capacity = None self._volume_name = None + self._selector = None + self._storage_class_name = None + + @property + def storage_class_name(self): + ''' storage_class_name property ''' + if self._storage_class_name is None: + self._storage_class_name = self.get_storage_class_name() + return self._storage_class_name + + @storage_class_name.setter + def storage_class_name(self, data): + ''' storage_class_name property setter''' + self._storage_class_name = data @property def volume_name(self): @@ -1481,6 +1517,24 @@ class PersistentVolumeClaim(Yedit): self._volume_name = data @property + def selector(self): + ''' selector property ''' + if self._selector is None: + self._selector = self.get_selector() + if not isinstance(self._selector, dict): + self._selector = dict(self._selector) + + return self._selector + + @selector.setter + def selector(self, data): + ''' selector property setter''' + if not isinstance(data, dict): + data = dict(data) + + self._selector = data + + @property def access_modes(self): ''' access_modes property ''' if self._access_modes is None: @@ -1510,6 +1564,14 @@ class PersistentVolumeClaim(Yedit): ''' volume_capacity property setter''' self._volume_capacity = data + def get_storage_class_name(self): + '''get storage_class_name''' + return self.get(PersistentVolumeClaim.storage_class_name_path) or [] + + def get_selector(self): + '''get selector''' + return self.get(PersistentVolumeClaim.selector_path) or [] + def get_access_modes(self): '''get access_modes''' return self.get(PersistentVolumeClaim.access_modes_path) or [] @@ -1663,6 +1725,8 @@ class OCPVC(OpenShiftCLI): params['kubeconfig'], params['access_modes'], params['volume_capacity'], + params['selector'], + params['storage_class_name'], ) oc_pvc = OCPVC(pconfig, verbose=params['debug']) @@ -1763,9 +1827,9 @@ def main(): name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), volume_capacity=dict(default='1G', type='str'), - access_modes=dict(default='ReadWriteOnce', - choices=['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'], - type='str'), + storage_class_name=dict(default=None, required=False, type='str'), + selector=dict(default=None, required=False, type='dict'), + access_modes=dict(default=['ReadWriteOnce'], type='list'), ), supports_check_mode=True, ) diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py index d5375e27a..686119c65 100644 --- a/roles/lib_openshift/library/oc_storageclass.py +++ b/roles/lib_openshift/library/oc_storageclass.py @@ -1427,7 +1427,7 @@ class StorageClassConfig(object): # pylint: disable=too-many-arguments def __init__(self, name, - provisioner=None, + provisioner, parameters=None, annotations=None, default_storage_class="false", @@ -1459,10 +1459,7 @@ class StorageClassConfig(object): self.data['metadata']['annotations']['storageclass.beta.kubernetes.io/is-default-class'] = \ self.default_storage_class - if self.provisioner is None: - self.data['provisioner'] = 'kubernetes.io/aws-ebs' - else: - self.data['provisioner'] = self.provisioner + self.data['provisioner'] = self.provisioner self.data['parameters'] = {} if self.parameters is not None: @@ -1668,7 +1665,7 @@ def main(): name=dict(default=None, type='str'), annotations=dict(default=None, type='dict'), parameters=dict(default=None, type='dict'), - provisioner=dict(default='aws-ebs', type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']), + provisioner=dict(required=True, type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']), api_version=dict(default='v1', type='str'), default_storage_class=dict(default="false", type='str'), ), diff --git a/roles/lib_openshift/src/ansible/oc_pvc.py b/roles/lib_openshift/src/ansible/oc_pvc.py index a5181e281..c98d811d6 100644 --- a/roles/lib_openshift/src/ansible/oc_pvc.py +++ b/roles/lib_openshift/src/ansible/oc_pvc.py @@ -16,9 +16,9 @@ def main(): name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), volume_capacity=dict(default='1G', type='str'), - access_modes=dict(default='ReadWriteOnce', - choices=['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'], - type='str'), + storage_class_name=dict(default=None, required=False, type='str'), + selector=dict(default=None, required=False, type='dict'), + access_modes=dict(default=['ReadWriteOnce'], type='list'), ), supports_check_mode=True, ) diff --git a/roles/lib_openshift/src/ansible/oc_storageclass.py b/roles/lib_openshift/src/ansible/oc_storageclass.py index 2bd8f18d5..e9f3ebbd3 100644 --- a/roles/lib_openshift/src/ansible/oc_storageclass.py +++ b/roles/lib_openshift/src/ansible/oc_storageclass.py @@ -14,7 +14,7 @@ def main(): name=dict(default=None, type='str'), annotations=dict(default=None, type='dict'), parameters=dict(default=None, type='dict'), - provisioner=dict(default='aws-ebs', type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']), + provisioner=dict(required=True, type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']), api_version=dict(default='v1', type='str'), default_storage_class=dict(default="false", type='str'), ), diff --git a/roles/lib_openshift/src/class/oc_adm_policy_group.py b/roles/lib_openshift/src/class/oc_adm_policy_group.py index 1e51913e0..6ad57bdce 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_group.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_group.py @@ -41,28 +41,28 @@ class PolicyGroup(OpenShiftCLI): self.verbose = verbose self._rolebinding = None self._scc = None - self._cluster_policy_bindings = None - self._policy_bindings = None + self._cluster_role_bindings = None + self._role_bindings = None @property - def policybindings(self): - if self._policy_bindings is None: - results = self._get('clusterpolicybindings', None) + def rolebindings(self): + if self._role_bindings is None: + results = self._get('rolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve policybindings') - self._policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve rolebindings') + self._role_bindings = results['results'][0]['items'] - return self._policy_bindings + return self._role_bindings @property - def clusterpolicybindings(self): - if self._cluster_policy_bindings is None: - results = self._get('clusterpolicybindings', None) + def clusterrolebindings(self): + if self._cluster_role_bindings is None: + results = self._get('clusterrolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') - self._cluster_policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve clusterrolebindings') + self._cluster_role_bindings = results['results'][0]['items'] - return self._cluster_policy_bindings + return self._cluster_role_bindings @property def role_binding(self): @@ -105,18 +105,17 @@ class PolicyGroup(OpenShiftCLI): ''' return whether role_binding exists ''' bindings = None if self.config.config_options['resource_kind']['value'] == 'cluster-role': - bindings = self.clusterpolicybindings + bindings = self.clusterrolebindings else: - bindings = self.policybindings + bindings = self.rolebindings if bindings is None: return False - for binding in bindings['roleBindings']: - _rb = binding['roleBinding'] - if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ - _rb['groupNames'] is not None and \ - self.config.config_options['group']['value'] in _rb['groupNames']: + for binding in bindings: + if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \ + binding['groupNames'] is not None and \ + self.config.config_options['group']['value'] in binding['groupNames']: self.role_binding = binding return True diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py index 37a685ebb..6fc8145c8 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_user.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py @@ -32,36 +32,36 @@ class PolicyUser(OpenShiftCLI): ''' Class to handle attaching policies to users ''' def __init__(self, - policy_config, + config, verbose=False): ''' Constructor for PolicyUser ''' - super(PolicyUser, self).__init__(policy_config.namespace, policy_config.kubeconfig, verbose) - self.config = policy_config + super(PolicyUser, self).__init__(config.namespace, config.kubeconfig, verbose) + self.config = config self.verbose = verbose self._rolebinding = None self._scc = None - self._cluster_policy_bindings = None - self._policy_bindings = None + self._cluster_role_bindings = None + self._role_bindings = None @property - def policybindings(self): - if self._policy_bindings is None: - results = self._get('policybindings', None) + def rolebindings(self): + if self._role_bindings is None: + results = self._get('rolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve policybindings') - self._policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve rolebindings') + self._role_bindings = results['results'][0]['items'] - return self._policy_bindings + return self._role_bindings @property - def clusterpolicybindings(self): - if self._cluster_policy_bindings is None: - results = self._get('clusterpolicybindings', None) + def clusterrolebindings(self): + if self._cluster_role_bindings is None: + results = self._get('clusterrolebindings', None) if results['returncode'] != 0: - raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') - self._cluster_policy_bindings = results['results'][0]['items'][0] + raise OpenShiftCLIError('Could not retrieve clusterrolebindings') + self._cluster_role_bindings = results['results'][0]['items'] - return self._cluster_policy_bindings + return self._cluster_role_bindings @property def role_binding(self): @@ -99,18 +99,17 @@ class PolicyUser(OpenShiftCLI): ''' return whether role_binding exists ''' bindings = None if self.config.config_options['resource_kind']['value'] == 'cluster-role': - bindings = self.clusterpolicybindings + bindings = self.clusterrolebindings else: - bindings = self.policybindings + bindings = self.rolebindings if bindings is None: return False - for binding in bindings['roleBindings']: - _rb = binding['roleBinding'] - if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ - _rb['userNames'] is not None and \ - self.config.config_options['user']['value'] in _rb['userNames']: + for binding in bindings: + if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \ + binding['userNames'] is not None and \ + self.config.config_options['user']['value'] in binding['userNames']: self.role_binding = binding return True diff --git a/roles/lib_openshift/src/class/oc_clusterrole.py b/roles/lib_openshift/src/class/oc_clusterrole.py index ae6795446..328e5cb67 100644 --- a/roles/lib_openshift/src/class/oc_clusterrole.py +++ b/roles/lib_openshift/src/class/oc_clusterrole.py @@ -56,7 +56,7 @@ class OCClusterRole(OpenShiftCLI): self.clusterrole = ClusterRole(content=result['results'][0]) result['results'] = self.clusterrole.yaml_dict - elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']: + elif '"{}" not found'.format(self.name) in result['stderr']: result['returncode'] = 0 self.clusterrole = None diff --git a/roles/lib_openshift/src/class/oc_pvc.py b/roles/lib_openshift/src/class/oc_pvc.py index c73abc47c..6b566c301 100644 --- a/roles/lib_openshift/src/class/oc_pvc.py +++ b/roles/lib_openshift/src/class/oc_pvc.py @@ -85,6 +85,8 @@ class OCPVC(OpenShiftCLI): params['kubeconfig'], params['access_modes'], params['volume_capacity'], + params['selector'], + params['storage_class_name'], ) oc_pvc = OCPVC(pconfig, verbose=params['debug']) diff --git a/roles/lib_openshift/src/doc/pvc b/roles/lib_openshift/src/doc/pvc index 9240f2a0f..268ad0b94 100644 --- a/roles/lib_openshift/src/doc/pvc +++ b/roles/lib_openshift/src/doc/pvc @@ -59,6 +59,18 @@ options: - ReadOnlyMany - ReadWriteMany aliases: [] + storage_class_name: + description: + - The storage class name for the PVC + required: false + default: None + aliases: [] + selector: + description: + - A hash of key/values for the matchLabels + required: false + default: None + aliases: [] author: - "Kenny Woodson <kwoodson@redhat.com>" extends_documentation_fragment: [] diff --git a/roles/lib_openshift/src/lib/pvc.py b/roles/lib_openshift/src/lib/pvc.py index 929b50990..d1e935c32 100644 --- a/roles/lib_openshift/src/lib/pvc.py +++ b/roles/lib_openshift/src/lib/pvc.py @@ -11,7 +11,9 @@ class PersistentVolumeClaimConfig(object): namespace, kubeconfig, access_modes=None, - vol_capacity='1G'): + vol_capacity='1G', + selector=None, + storage_class_name=None): ''' constructor for handling pvc options ''' self.kubeconfig = kubeconfig self.name = sname @@ -19,6 +21,8 @@ class PersistentVolumeClaimConfig(object): self.access_modes = access_modes self.vol_capacity = vol_capacity self.data = {} + self.selector = selector + self.storage_class_name = storage_class_name self.create_dict() @@ -36,12 +40,16 @@ class PersistentVolumeClaimConfig(object): self.data['spec']['accessModes'] = ['ReadWriteOnce'] if self.access_modes: self.data['spec']['accessModes'] = self.access_modes + if self.selector: + self.data['spec']['selector'] = {'matchLabels': self.selector} # storage capacity self.data['spec']['resources'] = {} self.data['spec']['resources']['requests'] = {} self.data['spec']['resources']['requests']['storage'] = self.vol_capacity + if self.storage_class_name: + self.data['spec']['storageClassName'] = self.storage_class_name # pylint: disable=too-many-instance-attributes,too-many-public-methods class PersistentVolumeClaim(Yedit): @@ -51,13 +59,29 @@ class PersistentVolumeClaim(Yedit): volume_name_path = "spec.volumeName" bound_path = "status.phase" kind = 'PersistentVolumeClaim' + selector_path = "spec.selector.matchLabels" + storage_class_name_path = "spec.storageClassName" def __init__(self, content): - '''RoleBinding constructor''' + '''PersistentVolumeClaim constructor''' super(PersistentVolumeClaim, self).__init__(content=content) self._access_modes = None self._volume_capacity = None self._volume_name = None + self._selector = None + self._storage_class_name = None + + @property + def storage_class_name(self): + ''' storage_class_name property ''' + if self._storage_class_name is None: + self._storage_class_name = self.get_storage_class_name() + return self._storage_class_name + + @storage_class_name.setter + def storage_class_name(self, data): + ''' storage_class_name property setter''' + self._storage_class_name = data @property def volume_name(self): @@ -72,6 +96,24 @@ class PersistentVolumeClaim(Yedit): self._volume_name = data @property + def selector(self): + ''' selector property ''' + if self._selector is None: + self._selector = self.get_selector() + if not isinstance(self._selector, dict): + self._selector = dict(self._selector) + + return self._selector + + @selector.setter + def selector(self, data): + ''' selector property setter''' + if not isinstance(data, dict): + data = dict(data) + + self._selector = data + + @property def access_modes(self): ''' access_modes property ''' if self._access_modes is None: @@ -101,6 +143,14 @@ class PersistentVolumeClaim(Yedit): ''' volume_capacity property setter''' self._volume_capacity = data + def get_storage_class_name(self): + '''get storage_class_name''' + return self.get(PersistentVolumeClaim.storage_class_name_path) or [] + + def get_selector(self): + '''get selector''' + return self.get(PersistentVolumeClaim.selector_path) or [] + def get_access_modes(self): '''get access_modes''' return self.get(PersistentVolumeClaim.access_modes_path) or [] diff --git a/roles/lib_openshift/src/lib/storageclass.py b/roles/lib_openshift/src/lib/storageclass.py index ef12a8d2d..c49a3066a 100644 --- a/roles/lib_openshift/src/lib/storageclass.py +++ b/roles/lib_openshift/src/lib/storageclass.py @@ -8,7 +8,7 @@ class StorageClassConfig(object): # pylint: disable=too-many-arguments def __init__(self, name, - provisioner=None, + provisioner, parameters=None, annotations=None, default_storage_class="false", @@ -40,10 +40,7 @@ class StorageClassConfig(object): self.data['metadata']['annotations']['storageclass.beta.kubernetes.io/is-default-class'] = \ self.default_storage_class - if self.provisioner is None: - self.data['provisioner'] = 'kubernetes.io/aws-ebs' - else: - self.data['provisioner'] = self.provisioner + self.data['provisioner'] = self.provisioner self.data['parameters'] = {} if self.parameters is not None: diff --git a/roles/lib_openshift/src/test/integration/oc_pvc.yml b/roles/lib_openshift/src/test/integration/oc_pvc.yml new file mode 100755 index 000000000..fb3a4781f --- /dev/null +++ b/roles/lib_openshift/src/test/integration/oc_pvc.yml @@ -0,0 +1,28 @@ +#!/usr/bin/ansible-playbook --module-path=../../../library/ +# ./oc_pvc.yml -e "cli_master_test=$OPENSHIFT_MASTER +--- +- hosts: "{{ cli_master_test }}" + gather_facts: no + user: root + tasks: + - name: create pvc + oc_pvc: + state: present + name: oc-pvc-create-test + namespace: default + volume_capacity: 3G + access_modes: + - ReadWriteOnce + selector: + foo: bar + storage_class_name: my-storage-class-name + register: pvcout + - debug: var=pvcout + + - assert: + that: + - pvcout.results.results[0]['metadata']['name'] == 'oc-pvc-create-test' + - pvcout.results.results[0]['spec']['storageClassName'] == 'my-storage-class-name' + - pvcout.results.results[0]['spec']['selector']['matchLabels']['foo'] == 'bar' + - pvcout.changed + msg: pvc create failed. diff --git a/roles/lib_openshift/src/test/unit/test_oc_pvc.py b/roles/lib_openshift/src/test/unit/test_oc_pvc.py index 82187917d..a96f2e4a7 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_pvc.py +++ b/roles/lib_openshift/src/test/unit/test_oc_pvc.py @@ -30,6 +30,8 @@ class OCPVCTest(unittest.TestCase): 'name': 'mypvc', 'namespace': 'test', 'volume_capacity': '1G', + 'selector': {'foo': 'bar', 'abc': 'a123'}, + 'storage_class_name': 'mystorage', 'access_modes': 'ReadWriteMany'} @mock.patch('oc_pvc.Utils.create_tmpfile_copy') @@ -65,6 +67,13 @@ class OCPVCTest(unittest.TestCase): "storage": "1Gi" } }, + "selector": { + "matchLabels": { + "foo": "bar", + "abc": "a123" + } + }, + "storageClassName": "myStorage", "volumeName": "pv-aws-ow5vl" }, "status": { @@ -93,6 +102,8 @@ class OCPVCTest(unittest.TestCase): self.assertTrue(results['changed']) self.assertEqual(results['results']['results'][0]['metadata']['name'], 'mypvc') + self.assertEqual(results['results']['results'][0]['spec']['storageClassName'], 'myStorage') + self.assertEqual(results['results']['results'][0]['spec']['selector']['matchLabels']['foo'], 'bar') @mock.patch('oc_pvc.Utils.create_tmpfile_copy') @mock.patch('oc_pvc.OCPVC._run') diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 49cc51b48..42c4945b4 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -2222,14 +2222,10 @@ class OpenShiftFacts(object): product_version = self.system_facts['ansible_product_version'] virt_type = self.system_facts['ansible_virtualization_type'] virt_role = self.system_facts['ansible_virtualization_role'] + bios_vendor = self.system_facts['ansible_system_vendor'] provider = None metadata = None - # TODO: this is not exposed through module_utils/facts.py in ansible, - # need to create PR for ansible to expose it - bios_vendor = get_file_content( # noqa: F405 - '/sys/devices/virtual/dmi/id/bios_vendor' - ) if bios_vendor == 'Google': provider = 'gce' metadata_url = ('http://metadata.google.internal/' diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py index 581dd7d15..23da53940 100644 --- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py +++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py @@ -13,6 +13,7 @@ except ImportError: display = Display() from ansible.plugins.action import ActionBase +from ansible.module_utils.six import string_types # Augment sys.path so that we can import checks from a directory relative to # this callback plugin. @@ -39,7 +40,8 @@ class ActionModule(ActionBase): try: known_checks = self.load_known_checks(tmp, task_vars) args = self._task.args - resolved_checks = resolve_checks(args.get("checks", []), known_checks.values()) + requested_checks = normalize(args.get('checks', [])) + resolved_checks = resolve_checks(requested_checks, known_checks.values()) except OpenShiftCheckException as e: result["failed"] = True result["msg"] = str(e) @@ -47,10 +49,7 @@ class ActionModule(ActionBase): result["checks"] = check_results = {} - user_disabled_checks = [ - check.strip() - for check in task_vars.get("openshift_disable_check", "").split(",") - ] + user_disabled_checks = normalize(task_vars.get('openshift_disable_check', [])) for check_name in resolved_checks: display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"])) @@ -134,3 +133,14 @@ def resolve_checks(names, all_checks): resolved.update(tag_to_checks[tag]) return resolved + + +def normalize(checks): + """Return a clean list of check names. + + The input may be a comma-separated string or a sequence. Leading and + trailing whitespace characters are removed. Empty items are discarded. + """ + if isinstance(checks, string_types): + checks = checks.split(',') + return [name.strip() for name in checks if name.strip()] diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py index a48e1c728..43ba6c406 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/logging.py +++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py @@ -11,6 +11,9 @@ from openshift_checks import OpenShiftCheck, OpenShiftCheckException class LoggingCheck(OpenShiftCheck): """Base class for OpenShift aggregated logging component checks""" + # FIXME: this should not be listed as a check, since it is not meant to be + # run by itself. + name = "logging" logging_namespace = "logging" @@ -27,7 +30,7 @@ class LoggingCheck(OpenShiftCheck): return masters[0] == hostname def run(self): - pass + return {} def get_pods_for_component(self, namespace, logging_component): """Get all pods for a given component. Returns: list of pods for component, error string""" diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index d2ef7cc71..97650e2ce 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -57,6 +57,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all']. - `openshift_logging_fluentd_buffer_queue_limit`: Buffer queue limit for Fluentd. Defaults to 1024. - `openshift_logging_fluentd_buffer_size_limit`: Buffer chunk limit for Fluentd. Defaults to 1m. +- `openshift_logging_fluentd_file_buffer_limit`: Fluentd will set the value to the file buffer limit. Defaults to '1Gi' per destination. - `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'. @@ -160,3 +161,18 @@ Elasticsearch OPS too, if using an OPS cluster: need to set this - `openshift_logging_mux_buffer_queue_limit`: Default `[1024]` - Buffer queue limit for Mux. - `openshift_logging_mux_buffer_size_limit`: Default `[1m]` - Buffer chunk limit for Mux. +- `openshift_logging_mux_file_buffer_limit`: Default `[2Gi]` per destination - Mux will + set the value to the file buffer limit. +- `openshift_logging_mux_file_buffer_storage_type`: Default `[emptydir]` - Storage + type for the file buffer. One of [`emptydir`, `pvc`, `hostmount`] + +- `openshift_logging_mux_file_buffer_pvc_size`: The requested size for the file buffer + PVC, when not provided the role will not generate any PVCs. Defaults to `4Gi`. +- `openshift_logging_mux_file_buffer_pvc_dynamic`: Whether or not to add the dynamic + PVC annotation for any generated PVCs. Defaults to 'False'. +- `openshift_logging_mux_file_buffer_pvc_pv_selector`: A key/value map added to a PVC + in order to select specific PVs. Defaults to 'None'. +- `openshift_logging_mux_file_buffer_pvc_prefix`: The prefix for the generated PVCs. + Defaults to 'logging-mux'. +- `openshift_logging_mux_file_buffer_storage_group`: The storage group used for Mux. + Defaults to '65534'. diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index ae7e48caa..3113fb3c9 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -91,7 +91,7 @@ es_port: "{{ openshift_logging_curator_es_port }}" curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}" curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}" - replicas: "{{ openshift_logging_curator_replicas | default (1) }}" + curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}" curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}" check_mode: no changed_when: no diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2 index 1bf9b9de2..6431f86d9 100644 --- a/roles/openshift_logging_curator/templates/curator.j2 +++ b/roles/openshift_logging_curator/templates/curator.j2 @@ -7,7 +7,7 @@ metadata: component: "{{component}}" logging-infra: "{{logging_component}}" spec: - replicas: {{replicas|default(1)}} + replicas: {{curator_replicas|default(1)}} selector: provider: openshift component: "{{component}}" diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 532f4a85d..0548e3c40 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -257,7 +257,7 @@ es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}" es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}" deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}" - replicas: 1 + es_replicas: 1 - name: Set ES dc oc_obj: diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 7424db6f6..cbe6b89f2 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -8,7 +8,7 @@ metadata: deployment: "{{deploy_name}}" logging-infra: "{{logging_component}}" spec: - replicas: {{replicas|default(1)}} + replicas: {{es_replicas|default(1)}} selector: provider: openshift component: "{{component}}" @@ -78,7 +78,7 @@ spec: name: "INSTANCE_RAM" value: "{{openshift_logging_elasticsearch_memory_limit}}" - - name: "HEAP_DUMP_LOCATION" + name: "HEAP_DUMP_LOCATION" value: "/elasticsearch/persistent/heapdump.hprof" - name: "NODE_QUORUM" diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index ce7cfc433..be9943b0d 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -57,3 +57,5 @@ openshift_logging_fluentd_es_copy: false #fluentd_config_contents: #fluentd_throttle_contents: #fluentd_secureforward_contents: + +openshift_logging_fluentd_file_buffer_limit: 1Gi diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 index 970e5c2a5..a4cf9a149 100644 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -62,6 +62,8 @@ spec: - name: dockerdaemoncfg mountPath: /etc/docker readOnly: true + - name: filebufferstorage + mountPath: /var/lib/fluentd {% if openshift_logging_use_mux_client | bool %} - name: muxcerts mountPath: /etc/fluent/muxkeys @@ -112,6 +114,8 @@ spec: resource: limits.memory - name: "USE_MUX_CLIENT" value: "{{ openshift_logging_use_mux_client | default('false') | lower }}" + - name: "FILE_BUFFER_LIMIT" + value: "{{ openshift_logging_fluentd_file_buffer_limit | default('1Gi') }}" volumes: - name: runlogjournal hostPath: @@ -145,3 +149,6 @@ spec: secret: secretName: logging-mux {% endif %} + - name: filebufferstorage + hostPath: + path: "/var/lib/fluentd" diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 93cb82793..62bc26e37 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -233,7 +233,7 @@ kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}" kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}" kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}" - replicas: "{{ openshift_logging_kibana_replicas | default (1) }}" + kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}" kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}" - name: Set Kibana DC diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2 index f8043812b..512d99d06 100644 --- a/roles/openshift_logging_kibana/templates/kibana.j2 +++ b/roles/openshift_logging_kibana/templates/kibana.j2 @@ -7,7 +7,7 @@ metadata: component: "{{ component }}" logging-infra: "{{ logging_component }}" spec: - replicas: {{ replicas | default(1) }} + replicas: {{ kibana_replicas | default(1) }} selector: provider: openshift component: "{{ component }}" diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index 797a27c1b..35fc7146f 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -47,3 +47,20 @@ openshift_logging_mux_ops_ca: /etc/fluent/keys/ca #mux_config_contents: #mux_throttle_contents: #mux_secureforward_contents: + +# One of ['emptydir', 'pvc', 'hostmount'] +openshift_logging_mux_file_buffer_storage_type: "emptydir" + +# pvc options +# the name of the PVC we will bind to -- create it if it does not exist +openshift_logging_mux_file_buffer_pvc_name: "logging-mux-pvc" + +# required if the PVC does not already exist +openshift_logging_mux_file_buffer_pvc_size: 4Gi +openshift_logging_mux_file_buffer_pvc_dynamic: false +openshift_logging_mux_file_buffer_pvc_pv_selector: {} +openshift_logging_mux_file_buffer_pvc_access_modes: ['ReadWriteOnce'] +openshift_logging_mux_file_buffer_storage_group: '65534' + +openshift_logging_mux_file_buffer_pvc_prefix: "logging-mux" +openshift_logging_mux_file_buffer_limit: 2Gi diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 54af40070..8ec93de7d 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -172,11 +172,23 @@ ops_port: "{{ openshift_logging_mux_ops_port }}" mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}" mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}" - replicas: "{{ openshift_logging_mux_replicas | default(1) }}" + mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}" mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}" check_mode: no changed_when: no +- name: Create Mux PVC + oc_pvc: + state: present + name: "{{ openshift_logging_mux_file_buffer_pvc_name }}" + namespace: "{{ openshift_logging_mux_namespace }}" + volume_capacity: "{{ openshift_logging_mux_file_buffer_pvc_size }}" + access_modes: "{{ openshift_logging_mux_file_buffer_pvc_access_modes | list }}" + selector: "{{ openshift_logging_mux_file_buffer_pvc_pv_selector }}" + storage_class_name: "{{ openshift_logging_mux_file_buffer_pvc_storage_class_name | default('', true) }}" + when: + - openshift_logging_mux_file_buffer_storage_type == "pvc" + - name: Set logging-mux DC oc_obj: state: present diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2 index 226294847..e43d9d397 100644 --- a/roles/openshift_logging_mux/templates/mux.j2 +++ b/roles/openshift_logging_mux/templates/mux.j2 @@ -7,7 +7,7 @@ metadata: component: "{{component}}" logging-infra: "{{logging_component}}" spec: - replicas: {{replicas|default(1)}} + replicas: {{mux_replicas|default(1)}} selector: provider: openshift component: "{{component}}" @@ -66,6 +66,8 @@ spec: - name: muxcerts mountPath: /etc/fluent/muxkeys readOnly: true + - name: filebufferstorage + mountPath: /var/lib/fluentd env: - name: "K8S_HOST_URL" value: "{{openshift_logging_mux_master_url}}" @@ -115,6 +117,8 @@ spec: resourceFieldRef: containerName: "mux" resource: limits.memory + - name: "FILE_BUFFER_LIMIT" + value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}" volumes: - name: config configMap: @@ -131,3 +135,13 @@ spec: - name: muxcerts secret: secretName: logging-mux + - name: filebufferstorage +{% if openshift_logging_mux_file_buffer_storage_type == 'pvc' %} + persistentVolumeClaim: + claimName: {{ openshift_logging_mux_file_buffer_pvc_name }} +{% elif openshift_logging_mux_file_buffer_storage_type == 'hostmount' %} + hostPath: + path: "/var/log/fluentd" +{% else %} + emptydir: {} +{% endif %} diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 0c4ee319c..1f182a25c 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -133,12 +133,12 @@ - block: - name: check whether our docker-registry setting exists in the env file command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master" - ignore_errors: true + failed_when: false changed_when: false register: already_set - set_fact: - openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" + openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (already_set.stdout is defined and already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" - name: Set fact of all etcd host IPs openshift_facts: diff --git a/roles/openshift_metrics/tasks/generate_rolebindings.yaml b/roles/openshift_metrics/tasks/generate_rolebindings.yaml index 1304ab8b5..9882b1eb5 100644 --- a/roles/openshift_metrics/tasks/generate_rolebindings.yaml +++ b/roles/openshift_metrics/tasks/generate_rolebindings.yaml @@ -37,3 +37,12 @@ src: hawkular_metrics_role.j2 dest: "{{ mktemp.stdout }}/templates/hawkular-cluster-role.yaml" changed_when: no + +- name: Set hawkular cluster roles + oc_obj: + name: hawkular-metrics + namespace: "{{ openshift_metrics_hawkular_agent_namespace }}" + kind: clusterrole + files: + - "{{ mktemp.stdout }}/templates/hawkular-cluster-role.yaml" + delete_after: true diff --git a/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml b/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml index e9d70f74f..db27680fe 100644 --- a/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml +++ b/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml @@ -13,3 +13,15 @@ - name: cassandra secret: hawkular-cassandra-secrets changed_when: no + +- name: Set serviceaccounts for hawkular metrics/cassandra + oc_obj: + name: "{{ item }}" + kind: serviceaccount + namespace: "{{ openshift_metrics_hawkular_agent_namespace }}" + files: + - "{{ mktemp.stdout }}/templates/metrics-{{ item }}-sa.yaml" + delete_after: true + with_items: + - hawkular + - cassandra diff --git a/roles/openshift_metrics/templates/route.j2 b/roles/openshift_metrics/templates/route.j2 index 08ca87288..423ab54a3 100644 --- a/roles/openshift_metrics/templates/route.j2 +++ b/roles/openshift_metrics/templates/route.j2 @@ -17,7 +17,7 @@ spec: tls: termination: {{ tls.termination }} {% if tls.ca_certificate is defined and tls.ca_certificate | length > 0 %} - CACertificate: | + caCertificate: | {{ tls.ca_certificate|indent(6, true) }} {% endif %} {% if tls.key is defined and tls.key | length > 0 %} diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 5904ca9bc..47073ee0f 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -8,7 +8,7 @@ os_firewall_allow: port: 443/tcp - service: OpenShift OVS sdn port: 4789/udp - when: openshift.common.use_openshift_sdn | bool + when: openshift.common.use_openshift_sdn | default(true) | bool - service: Calico BGP Port port: 179/tcp when: openshift.common.use_calico | bool diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 6b38da7f8..f2c45a4bd 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -3,7 +3,7 @@ systemd: name: openvswitch state: restarted - when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool + when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | default(true) | bool register: l_openshift_node_stop_openvswitch_result until: not l_openshift_node_stop_openvswitch_result | failed retries: 3 diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 3b7e8126a..e19d82ddc 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -30,7 +30,7 @@ dependencies: os_firewall_allow: - service: OpenShift OVS sdn port: 4789/udp - when: openshift.common.use_openshift_sdn | bool + when: openshift.common.use_openshift_sdn | default(true) | bool - role: os_firewall os_firewall_allow: - service: Calico BGP Port diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 0133533fc..8b4931e7c 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -90,7 +90,9 @@ package: name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present - when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool + when: + - openshift.common.use_openshift_sdn | default(true) | bool + - not openshift.common.is_containerized | bool - name: Install conntrack-tools package package: @@ -119,7 +121,9 @@ enabled: yes state: started daemon_reload: yes - when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool + when: + - openshift.common.is_containerized | bool + - openshift.common.use_openshift_sdn | default(true) | bool register: ovs_start_result until: not ovs_start_result | failed retries: 3 diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh index 924226d09..4aab8f2e9 100755 --- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh +++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh @@ -37,6 +37,8 @@ if [[ $2 =~ ^(up|dhcp4-change|dhcp6-change)$ ]]; then UPSTREAM_DNS_TMP_SORTED=`mktemp` CURRENT_UPSTREAM_DNS_SORTED=`mktemp` NEW_RESOLV_CONF=`mktemp` + NEW_NODE_RESOLV_CONF=`mktemp` + ###################################################################### # couldn't find an existing method to determine if the interface owns the @@ -60,12 +62,14 @@ EOF fi ###################################################################### - # Generate a new origin dns config file + # Write out default nameservers for /etc/dnsmasq.d/origin-upstream-dns.conf + # and /etc/origin/node/resolv.conf in their respective formats for ns in ${IP4_NAMESERVERS}; do if [[ ! -z $ns ]]; then - echo "server=${ns}" + echo "server=${ns}" >> $UPSTREAM_DNS_TMP + echo "nameserver ${ns}" >> $NEW_NODE_RESOLV_CONF fi - done > $UPSTREAM_DNS_TMP + done # Sort it in case DNS servers arrived in a different order sort $UPSTREAM_DNS_TMP > $UPSTREAM_DNS_TMP_SORTED @@ -74,7 +78,6 @@ EOF # Compare to the current config file (sorted) NEW_DNS_SUM=`md5sum ${UPSTREAM_DNS_TMP_SORTED} | awk '{print $1}'` CURRENT_DNS_SUM=`md5sum ${CURRENT_UPSTREAM_DNS_SORTED} | awk '{print $1}'` - if [ "${NEW_DNS_SUM}" != "${CURRENT_DNS_SUM}" ]; then # DNS has changed, copy the temp file to the proper location (-Z # sets default selinux context) and set the restart flag @@ -82,6 +85,13 @@ EOF NEEDS_RESTART=1 fi + # compare /etc/origin/node/resolv.conf checksum and replace it if different + NEW_NODE_RESOLV_CONF_MD5=`md5sum ${NEW_NODE_RESOLV_CONF}` + OLD_NODE_RESOLV_CONF_MD5=`md5sum /etc/origin/node/resolv.conf` + if [ "${NEW_NODE_RESOLV_CONF_MD5}" != "${OLD_NODE_RESOLV_CONF_MD5}" ]; then + cp -Z $NEW_NODE_RESOLV_CONF /etc/origin/node/resolv.conf + fi + if ! `systemctl -q is-active dnsmasq.service`; then NEEDS_RESTART=1 fi @@ -91,17 +101,14 @@ EOF systemctl restart dnsmasq fi - # Only if dnsmasq is running properly make it our only nameserver, copy - # original resolv.conf to /etc/origin/node/resolv.conf for node service to - # bypass dnsmasq + # Only if dnsmasq is running properly make it our only nameserver and place + # a watermark on /etc/resolv.conf if `systemctl -q is-active dnsmasq.service`; then - if ! grep -q '99-origin-dns.sh' ${NEW_RESOLV_CONF}; then + if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> ${NEW_RESOLV_CONF} - cp /etc/resolv.conf /etc/origin/node/resolv.conf fi - sed -e '/^nameserver.*$/d' /etc/resolv.conf > ${NEW_RESOLV_CONF} + sed -e '/^nameserver.*$/d' /etc/resolv.conf >> ${NEW_RESOLV_CONF} echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF} - if ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF} fi diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml index 110dfe5ce..d31b899cf 100644 --- a/roles/openshift_node_upgrade/handlers/main.yml +++ b/roles/openshift_node_upgrade/handlers/main.yml @@ -3,7 +3,10 @@ systemd: name: openvswitch state: restarted - when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool + when: + - not skip_node_svc_handlers | default(False) | bool + - not (ovs_service_status_changed | default(false) | bool) + - openshift.common.use_openshift_sdn | default(true) | bool register: l_openshift_node_upgrade_stop_openvswitch_result until: not l_openshift_node_upgrade_stop_openvswitch_result | failed retries: 3 @@ -26,3 +29,8 @@ when: - (not skip_node_svc_handlers | default(False) | bool) - not (node_service_status_changed | default(false) | bool) + +# TODO(jchaloup): once it is verified the systemd module works as expected +# switch to it: http://docs.ansible.com/ansible/latest/systemd_module.html +- name: reload systemd units + command: systemctl daemon-reload diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index ac9ea32cb..f984a04b2 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -43,7 +43,9 @@ docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool + when: + - openshift.common.is_containerized | bool + - openshift.common.use_openshift_sdn | default(true) | bool - include: docker/upgrade.yml vars: diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml index e8f017445..9b3805eea 100644 --- a/roles/openshift_node_upgrade/tasks/systemd_units.yml +++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml @@ -22,23 +22,27 @@ template: dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service" src: openshift.docker.node.dep.service - register: install_node_dep_result when: openshift.common.is_containerized | bool + notify: + - reload systemd units + - restart node - name: Install Node docker service file template: dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" src: openshift.docker.node.service - register: install_node_result when: openshift.common.is_containerized | bool + notify: + - reload systemd units + - restart node - name: Create the openvswitch service env file template: src: openvswitch.sysconfig.j2 dest: /etc/sysconfig/openvswitch when: openshift.common.is_containerized | bool - register: install_ovs_sysconfig notify: + - reload systemd units - restart openvswitch # May be a temporary workaround. @@ -52,8 +56,8 @@ dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf" src: openvswitch-avoid-oom.conf when: openshift.common.use_openshift_sdn | default(true) | bool - register: install_oom_fix_result notify: + - reload systemd units - restart openvswitch - name: Install OpenvSwitch docker service file @@ -62,6 +66,7 @@ src: openvswitch.docker.service when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | default(true) | bool notify: + - reload systemd units - restart openvswitch - name: Configure Node settings @@ -96,9 +101,3 @@ when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '') notify: - restart node - -- name: Reload systemd units - command: systemctl daemon-reload - when: (openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)) or install_oom_fix_result | changed - notify: - - restart node diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index a846889ca..8661f33a1 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -1,6 +1,6 @@ --- openshift_storage_glusterfs_timeout: 300 -openshift_storage_glusterfs_namespace: 'glusterfs' +openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if glusterfs_is_native or glusterfs_heketi_is_native else 'default' | quote }}" openshift_storage_glusterfs_is_native: True openshift_storage_glusterfs_name: 'storage' openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host" |