diff options
22 files changed, 1579 insertions, 239 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index a9b7ba843..087da58c4 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.61-1 ./ +3.0.65-1 ./ diff --git a/README_libvirt.md b/README_libvirt.md index 8b46252b3..5c72eb64f 100644 --- a/README_libvirt.md +++ b/README_libvirt.md @@ -10,7 +10,7 @@ This makes `libvirt` useful to develop, test and debug OpenShift and openshift-a Install dependencies -------------------- -1. Install [ansible](http://www.ansible.com/) +1. Install [ansible](http://www.ansible.com/) 2. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) 3. Install [ebtables](http://ebtables.netfilter.org/) 4. Install [qemu and qemu-system-x86](http://wiki.qemu.org/Main_Page) @@ -121,6 +121,11 @@ The following options can be passed via the `-o` flag of the `create` command or * `image_name` (default to `CentOS-7-x86_64-GenericCloud.qcow2`): Name of the QCOW2 image to boot the VMs on * `image_compression` (default to `xz`): Source QCOW2 compression (only xz supported at this time) * `image_sha256` (default to `dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471`): Expected SHA256 checksum of the downloaded image +* `libvirt_storage_pool` (default to `openshift-ansible`): name of the libvirt storage pool for the VM images. It will be created if it does not exist +* `libvirt_storage_pool_path` (default to `$HOME/libvirt-storage-pool-openshift-ansible`): path to `libvirt_storage_pool`, i.e. where the VM images are stored +* `libvirt_network` (default to `openshift-ansible`): name of the libvirt network that the VMs will use. It will be created if it does not exist +* `libvirt_instance_memory_mib` (default to `1024`): memory of the VMs in MiB +* `libvirt_instance_vcpu` (default to `2`): number of vCPUs of the VMs * `skip_image_download` (default to `no`): Skip QCOW2 image download. This requires the `image_name` QCOW2 image to be already present in `$HOME/libvirt-storage-pool-openshift-ansible` Creating a cluster diff --git a/inventory/multi_inventory.py b/inventory/multi_inventory.py index 57be259f7..be597267e 100755 --- a/inventory/multi_inventory.py +++ b/inventory/multi_inventory.py @@ -316,17 +316,29 @@ class MultiInventory(object): inventory[key].append(name) def apply_group_selectors(self, inventory, group_selectors): - ''' Apply the account config for clone groups ''' + ''' Apply the account config for group selectors ''' _ = self # Here for pylint. wanted an instance method instead of static - for selector in group_selectors: - if inventory.has_key(selector['from_group']): - inventory[selector['from_group']].sort() - inventory[selector['name']] = inventory[selector['from_group']][0:selector['count']] - for host in inventory[selector['from_group']]: - if host in inventory[selector['name']]: - inventory['_meta']['hostvars'][host][selector['name']] = True + # There could be multiple clusters per account. We need to process these selectors + # based upon the oo_clusterid_ variable. + clusterids = [group for group in inventory if "oo_clusterid_" in group] + + for clusterid in clusterids: + for selector in group_selectors: + if inventory.has_key(selector['from_group']): + hosts = list(set(inventory[clusterid]) & set(inventory[selector['from_group']])) + hosts.sort() + + # Multiple clusters in an account + if inventory.has_key(selector['name']): + inventory[selector['name']].extend(hosts[0:selector['count']]) else: - inventory['_meta']['hostvars'][host][selector['name']] = False + inventory[selector['name']] = hosts[0:selector['count']] + + for host in hosts: + if host in inventory[selector['name']]: + inventory['_meta']['hostvars'][host][selector['name']] = True + else: + inventory['_meta']['hostvars'][host][selector['name']] = False def apply_account_config(self, acc_config): ''' Apply account config settings ''' diff --git a/openshift-ansible.spec b/openshift-ansible.spec index f01e13f6e..fa9940225 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.61 +Version: 3.0.65 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -279,6 +279,28 @@ Atomic OpenShift Utilities includes %changelog +* Thu Mar 24 2016 Troy Dawson <tdawson@redhat.com> 3.0.65-1 +- Adding deployment config and refactored. (kwoodson@redhat.com) +- ManageIQ SA: Adding image-puller role (efreiber@redhat.com) + +* Wed Mar 23 2016 Troy Dawson <tdawson@redhat.com> 3.0.64-1 +- Latest cli updates from generated files (kwoodson@redhat.com) +- Add /dev to node containers (sdodson@redhat.com) +- Fix indention (whearn@redhat.com) +- Support setting local storage perFSGroup quota in node config. + (dgoodwin@redhat.com) +- Fix line break (whearn@redhat.com) +- Lock down permissions on named certificates (elyscape@gmail.com) +- Add namespace flag to oc create (whearn@redhat.com) + +* Mon Mar 21 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.63-1 +- Modified group selectors for muliple clusters per account + (kwoodson@redhat.com) + +* Fri Mar 18 2016 Troy Dawson <tdawson@redhat.com> 3.0.62-1 +- Yaml editor first attempt (kwoodson@redhat.com) +- libvirt cluster variables cleanup (pep@redhat.com) + * Thu Mar 17 2016 Troy Dawson <tdawson@redhat.com> 3.0.61-1 - Bug 1317755 - Set insecure-registry for internal registry by default (jdetiber@redhat.com) diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 972427c53..f1eaf8e16 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -311,13 +311,14 @@ file: path: "{{ named_certs_dir }}" state: directory + mode: 0700 when: named_certs_specified | bool - name: Land named certificates copy: src="{{ item.certfile }}" dest="{{ named_certs_dir }}" with_items: openshift_master_named_certificates when: named_certs_specified | bool - name: Land named certificate keys - copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}" + copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}" mode=0600 with_items: openshift_master_named_certificates when: named_certs_specified | bool diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index 30333f7be..701d57d26 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -7,11 +7,6 @@ vars_files: - vars.yml vars: - os_libvirt_storage_pool: "{{ libvirt_storage_pool | default('images') }}" - os_libvirt_storage_pool_path: "{{ libvirt_storage_pool_path | default('/var/lib/libvirt/images') }}" - os_libvirt_network: "{{ libvirt_network | default('default') }}" - os_libvirt_instance_memory_mib: "{{ lookup('oo_option', 'libvirt_instance_memory_mib') | default(1024) }}" - os_libvirt_instance_vcpu: "{{ lookup('oo_option', 'libvirt_instance_vcpu') | default(2) }}" image_url: "{{ deployment_vars[deployment_type].image.url }}" image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}" image_name: "{{ deployment_vars[deployment_type].image.name }}" diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index d77b80c62..937a765fa 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -13,58 +13,58 @@ get_url: url: '{{ image_url }}' sha256sum: '{{ image_sha256 }}' - dest: '{{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}' + dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}' when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}' register: downloaded_image - name: Uncompress xz compressed base cloud image - command: 'unxz -kf {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' + command: 'unxz -kf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' args: - creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' + creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}' when: image_compression in ["xz"] and downloaded_image.changed - name: Uncompress tgz compressed base cloud image - command: 'tar zxvf {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' + command: 'tar zxvf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' args: - creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' + creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}' when: image_compression in ["tgz"] and downloaded_image.changed - name: Uncompress gzip compressed base cloud image - command: 'gunzip {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' + command: 'gunzip {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' args: - creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' + creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}' when: image_compression in ["gz"] and downloaded_image.changed - name: Create the cloud-init config drive path file: - dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/' + dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' state: directory with_items: instances - name: Create the cloud-init config drive files template: src: '{{ item[1] }}' - dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}' + dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}' with_nested: - instances - [ user-data, meta-data ] - name: Create the cloud-init config drive - command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data' + command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data' args: - chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/' - creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' + chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' + creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' with_items: instances - name: Refresh the libvirt storage pool for openshift command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}' - name: Create VM drives - command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2' + command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2' with_items: instances - name: Create VM docker drives - command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}-docker.qcow2 10G --format qcow2 --allocation 0' + command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}-docker.qcow2 10G --format qcow2 --allocation 0' with_items: instances - name: Create VMs diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml index 56f450642..8e96cec8d 100644 --- a/playbooks/libvirt/openshift-cluster/templates/domain.xml +++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml @@ -1,6 +1,6 @@ <domain type='kvm' id='8'> <name>{{ item }}</name> - <memory unit='MiB'>{{ os_libvirt_instance_memory_mib }}</memory> + <memory unit='MiB'>{{ libvirt_instance_memory_mib }}</memory> <metadata xmlns:ansible="https://github.com/ansible/ansible"> <ansible:tags> <ansible:tag>environment-{{ cluster_env }}</ansible:tag> @@ -9,7 +9,7 @@ <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag> </ansible:tags> </metadata> - <vcpu placement='static'>{{ os_libvirt_instance_vcpu }}</vcpu> + <vcpu placement='static'>{{ libvirt_instance_vcpu }}</vcpu> <os> <type arch='x86_64' machine='pc'>hvm</type> <boot dev='hd'/> @@ -31,23 +31,23 @@ <emulator>/usr/bin/qemu-system-x86_64</emulator> <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> - <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}.qcow2'/> + <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/> <target dev='vda' bus='virtio'/> </disk> <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> - <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'/> + <source file='{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'/> <target dev='vdb' bus='virtio'/> </disk> <disk type='file' device='cdrom'> <driver name='qemu' type='raw'/> - <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/> + <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/> <target dev='vdc' bus='virtio'/> <readonly/> </disk> <controller type='usb' index='0' /> <interface type='network'> - <source network='{{ os_libvirt_network }}'/> + <source network='{{ libvirt_network }}'/> <model type='virtio'/> </interface> <serial type='pty'> diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml index f28245f88..aa0c69e08 100644 --- a/playbooks/libvirt/openshift-cluster/vars.yml +++ b/playbooks/libvirt/openshift-cluster/vars.yml @@ -1,8 +1,11 @@ --- -libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible" -libvirt_storage_pool: 'openshift-ansible' -libvirt_network: openshift-ansible -libvirt_uri: 'qemu:///system' +default_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible" +libvirt_storage_pool_path: "{{ lookup('oo_option', 'libvirt_storage_pool_path') | default(default_pool_path, True) }}" +libvirt_storage_pool: "{{ lookup('oo_option', 'libvirt_storage_pool') | default('openshift-ansible', True) }}" +libvirt_network: "{{ lookup('oo_option', 'libvirt_network') | default('openshift-ansible', True) }}" +libvirt_instance_memory_mib: "{{ lookup('oo_option', 'libvirt_instance_memory_mib') | default(1024, True) }}" +libvirt_instance_vcpu: "{{ lookup('oo_option', 'libvirt_instance_vcpu') | default(2, True) }}" +libvirt_uri: "{{ lookup('oo_option', 'libvirt_uri') | default('qemu:///system', True) }}" debug_level: 2 # Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication. diff --git a/roles/lib_openshift_api/library/oc_deploymentconfig.py b/roles/lib_openshift_api/library/oc_deploymentconfig.py new file mode 100644 index 000000000..fbdaa8e9c --- /dev/null +++ b/roles/lib_openshift_api/library/oc_deploymentconfig.py @@ -0,0 +1,377 @@ +#!/usr/bin/env python +''' + OpenShiftCLI class that wraps the oc commands in a subprocess +''' +import atexit +import json +import os +import shutil +import subprocess +import yaml + +class OpenShiftCLI(object): + ''' Class to wrap the oc command line tools ''' + def __init__(self, + namespace, + kubeconfig='/etc/origin/master/admin.kubeconfig', + verbose=False): + ''' Constructor for OpenshiftOC ''' + self.namespace = namespace + self.verbose = verbose + self.kubeconfig = kubeconfig + + def replace(self, fname, force=False): + '''return all pods ''' + cmd = ['replace', '-f', fname] + if force: + cmd = ['replace', '--force', '-f', fname] + return self.oc_cmd(cmd) + + def create(self, fname): + '''return all pods ''' + return self.oc_cmd(['create', '-f', fname, '-n', self.namespace]) + + def delete(self, resource, rname): + '''return all pods ''' + return self.oc_cmd(['delete', resource, rname, '-n', self.namespace]) + + def get(self, resource, rname=None): + '''return a secret by name ''' + cmd = ['get', resource, '-o', 'json', '-n', self.namespace] + if rname: + cmd.append(rname) + + rval = self.oc_cmd(cmd, output=True) + + # Ensure results are retuned in an array + if rval.has_key('items'): + rval['results'] = rval['items'] + elif not isinstance(rval['results'], list): + rval['results'] = [rval['results']] + + return rval + + def oc_cmd(self, cmd, output=False): + '''Base command for oc ''' + #cmds = ['/usr/bin/oc', '--config', self.kubeconfig] + cmds = ['/usr/bin/oc'] + cmds.extend(cmd) + + results = '' + + if self.verbose: + print ' '.join(cmds) + + proc = subprocess.Popen(cmds, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env={'KUBECONFIG': self.kubeconfig}) + proc.wait() + if proc.returncode == 0: + if output: + try: + results = json.loads(proc.stdout.read()) + except ValueError as err: + if "No JSON object could be decoded" in err.message: + results = err.message + + if self.verbose: + print proc.stderr.read() + print results + print + + return {"returncode": proc.returncode, "results": results} + + return {"returncode": proc.returncode, + "stderr": proc.stderr.read(), + "stdout": proc.stdout.read(), + "results": {} + } + +class Utils(object): + ''' utilities for openshiftcli modules ''' + @staticmethod + def create_file(rname, data, ftype=None): + ''' create a file in tmp with name and contents''' + path = os.path.join('/tmp', rname) + with open(path, 'w') as fds: + if ftype == 'yaml': + fds.write(yaml.dump(data, default_flow_style=False)) + + elif ftype == 'json': + fds.write(json.dumps(data)) + else: + fds.write(data) + + # Register cleanup when module is done + atexit.register(Utils.cleanup, [path]) + return path + + @staticmethod + def create_files_from_contents(data): + '''Turn an array of dict: filename, content into a files array''' + files = [] + + for sfile in data: + path = Utils.create_file(sfile['path'], sfile['content']) + files.append(path) + + return files + + @staticmethod + def cleanup(files): + '''Clean up on exit ''' + for sfile in files: + if os.path.exists(sfile): + if os.path.isdir(sfile): + shutil.rmtree(sfile) + elif os.path.isfile(sfile): + os.remove(sfile) + + + @staticmethod + def exists(results, _name): + ''' Check to see if the results include the name ''' + if not results: + return False + + + if Utils.find_result(results, _name): + return True + + return False + + @staticmethod + def find_result(results, _name): + ''' Find the specified result by name''' + rval = None + for result in results: + if result.has_key('metadata') and result['metadata']['name'] == _name: + rval = result + break + + return rval + + @staticmethod + def get_resource_file(sfile, sfile_type='yaml'): + ''' return the service file ''' + contents = None + with open(sfile) as sfd: + contents = sfd.read() + + if sfile_type == 'yaml': + contents = yaml.load(contents) + elif sfile_type == 'json': + contents = json.loads(contents) + + return contents + + # Disabling too-many-branches. This is a yaml dictionary comparison function + # pylint: disable=too-many-branches,too-many-return-statements + @staticmethod + def check_def_equal(user_def, result_def, debug=False): + ''' Given a user defined definition, compare it with the results given back by our query. ''' + + # Currently these values are autogenerated and we do not need to check them + skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace'] + + for key, value in result_def.items(): + if key in skip: + continue + + # Both are lists + if isinstance(value, list): + if not isinstance(user_def[key], list): + return False + + # lists should be identical + if value != user_def[key]: + return False + + # recurse on a dictionary + elif isinstance(value, dict): + if not isinstance(user_def[key], dict): + if debug: + print "dict returned false not instance of dict" + return False + + # before passing ensure keys match + api_values = set(value.keys()) - set(skip) + user_values = set(user_def[key].keys()) - set(skip) + if api_values != user_values: + if debug: + print api_values + print user_values + print "keys are not equal in dict" + return False + + result = Utils.check_def_equal(user_def[key], value, debug=debug) + if not result: + if debug: + print "dict returned false" + return False + + # Verify each key, value pair is the same + else: + if not user_def.has_key(key) or value != user_def[key]: + if debug: + print "value not equal; user_def does not have key" + print value + print user_def[key] + return False + + return True + +class DeploymentConfig(OpenShiftCLI): + ''' Class to wrap the oc command line tools + ''' + def __init__(self, + namespace, + dname=None, + kubeconfig='/etc/origin/master/admin.kubeconfig', + verbose=False): + ''' Constructor for OpenshiftOC ''' + super(DeploymentConfig, self).__init__(namespace, kubeconfig) + self.namespace = namespace + self.name = dname + self.kubeconfig = kubeconfig + self.verbose = verbose + + def get_dc(self): + '''return a deploymentconfig by name ''' + return self.get('dc', self.name) + + def delete_dc(self): + '''return all pods ''' + return self.delete('dc', self.name) + + def new_dc(self, dfile): + '''Create a deploymentconfig ''' + return self.create(dfile) + + def update_dc(self, dfile, force=False): + '''run update dc + + This receives a list of file names and takes the first filename and calls replace. + ''' + return self.replace(dfile, force) + + +# pylint: disable=too-many-branches +def main(): + ''' + ansible oc module for deploymentconfig + ''' + + module = AnsibleModule( + argument_spec=dict( + kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), + state=dict(default='present', type='str', + choices=['present', 'absent', 'list']), + debug=dict(default=False, type='bool'), + namespace=dict(default='default', type='str'), + name=dict(default=None, type='str'), + deploymentconfig_file=dict(default=None, type='str'), + input_type=dict(default='yaml', choices=['yaml', 'json'], type='str'), + delete_after=dict(default=False, type='bool'), + content=dict(default=None, type='dict'), + force=dict(default=False, type='bool'), + ), + mutually_exclusive=[["contents", "deploymentconfig_file"]], + + supports_check_mode=True, + ) + occmd = DeploymentConfig(module.params['namespace'], + dname=module.params['name'], + kubeconfig=module.params['kubeconfig'], + verbose=module.params['debug']) + + state = module.params['state'] + + api_rval = occmd.get_dc() + + ##### + # Get + ##### + if state == 'list': + module.exit_json(changed=False, results=api_rval['results'], state="list") + + if not module.params['name']: + module.fail_json(msg='Please specify a name when state is absent|present.') + ######## + # Delete + ######## + if state == 'absent': + if not Utils.exists(api_rval['results'], module.params['name']): + module.exit_json(changed=False, state="absent") + + if module.check_mode: + module.exit_json(change=False, msg='Would have performed a delete.') + + api_rval = occmd.delete_dc() + module.exit_json(changed=True, results=api_rval, state="absent") + + + if state == 'present': + if module.params['deploymentconfig_file']: + dfile = module.params['deploymentconfig_file'] + elif module.params['content']: + dfile = Utils.create_file('dc', module.params['content']) + else: + module.fail_json(msg="Please specify content or deploymentconfig file.") + + ######## + # Create + ######## + if not Utils.exists(api_rval['results'], module.params['name']): + + if module.check_mode: + module.exit_json(change=False, msg='Would have performed a create.') + + api_rval = occmd.new_dc(dfile) + + # Remove files + if module.params['deploymentconfig_file'] and module.params['delete_after']: + Utils.cleanup([dfile]) + + if api_rval['returncode'] != 0: + module.fail_json(msg=api_rval) + + module.exit_json(changed=True, results=api_rval, state="present") + + ######## + # Update + ######## + if Utils.check_def_equal(Utils.get_resource_file(dfile), api_rval['results'][0]): + + # Remove files + if module.params['deploymentconfig_file'] and module.params['delete_after']: + Utils.cleanup([dfile]) + + module.exit_json(changed=False, results=api_rval['results'], state="present") + + if module.check_mode: + module.exit_json(change=False, msg='Would have performed an update.') + + api_rval = occmd.update_dc(dfile, force=module.params['force']) + + # Remove files + if module.params['deploymentconfig_file'] and module.params['delete_after']: + Utils.cleanup([dfile]) + + if api_rval['returncode'] != 0: + module.fail_json(msg=api_rval) + + + module.exit_json(changed=True, results=api_rval, state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_openshift_api/library/oc_secrets.py b/roles/lib_openshift_api/library/oc_secret.py index 841c14692..96a0f1db1 100644 --- a/roles/lib_openshift_api/library/oc_secrets.py +++ b/roles/lib_openshift_api/library/oc_secret.py @@ -1,72 +1,45 @@ #!/usr/bin/env python ''' -module for openshift cloud secrets + OpenShiftCLI class that wraps the oc commands in a subprocess ''' -# Examples: -# -# # to initiate and use /etc/origin/master/admin.kubeconfig file for auth -# - name: list secrets -# oc_secrets: -# state: list -# namespace: default -# -# # To get a specific secret named 'mysecret' -# - name: list secrets -# oc_secrets: -# state: list -# namespace: default -# name: mysecret -# -# # To create a secret: -# # This module expects the user to place the files on the remote server and pass them in. -# - name: create a secret from file -# oc_secrets: -# state: present -# namespace: default -# name: mysecret -# files: -# - /tmp/config.yml -# - /tmp/passwords.yml -# delete_after: False - -# # To create a secret: -# # This module expects the user to place the files on the remote server and pass them in. -# - name: create a secret from content -# oc_secrets: -# state: present -# namespace: default -# name: mysecret -# contents: -# - path: /tmp/config.yml -# content: "value=True\n" -# - path: /tmp/passwords.yml -# content: "test1\ntest2\ntest3\ntest4\n" -# - +import atexit +import json import os import shutil -import json -import atexit +import subprocess +import yaml -class OpenShiftOC(object): - ''' Class to wrap the oc command line tools - ''' +class OpenShiftCLI(object): + ''' Class to wrap the oc command line tools ''' def __init__(self, namespace, - secret_name=None, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False): ''' Constructor for OpenshiftOC ''' self.namespace = namespace - self.name = secret_name self.verbose = verbose self.kubeconfig = kubeconfig - def get_secrets(self): + def replace(self, fname, force=False): + '''return all pods ''' + cmd = ['replace', '-f', fname] + if force: + cmd = ['replace', '--force', '-f', fname] + return self.oc_cmd(cmd) + + def create(self, fname): + '''return all pods ''' + return self.oc_cmd(['create', '-f', fname, '-n', self.namespace]) + + def delete(self, resource, rname): + '''return all pods ''' + return self.oc_cmd(['delete', resource, rname, '-n', self.namespace]) + + def get(self, resource, rname=None): '''return a secret by name ''' - cmd = ['get', 'secrets', '-o', 'json', '-n', self.namespace] - if self.name: - cmd.append(self.name) + cmd = ['get', resource, '-o', 'json', '-n', self.namespace] + if rname: + cmd.append(rname) rval = self.oc_cmd(cmd, output=True) @@ -78,65 +51,9 @@ class OpenShiftOC(object): return rval - def delete_secret(self): - '''return all pods ''' - return self.oc_cmd(['delete', 'secrets', self.name, '-n', self.namespace]) - - def secret_new(self, files): - '''Create a secret with all pods ''' - secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files] - cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name] - cmd.extend(secrets) - - return self.oc_cmd(cmd) - - @staticmethod - def create_files_from_contents(data): - '''Turn an array of dict: filename, content into a files array''' - files = [] - for sfile in data: - with open(sfile['path'], 'w') as fds: - fds.write(sfile['content']) - files.append(sfile['path']) - - # Register cleanup when module is done - atexit.register(OpenShiftOC.cleanup, files) - return files - - def update_secret(self, files, force=False): - '''run update secret - - This receives a list of file names and converts it into a secret. - The secret is then written to disk and passed into the `oc replace` command. - ''' - secret = self.prep_secret(files) - if secret['returncode'] != 0: - return secret - - sfile_path = '/tmp/%s' % secret['results']['metadata']['name'] - with open(sfile_path, 'w') as sfd: - sfd.write(json.dumps(secret['results'])) - - cmd = ['replace', '-f', sfile_path] - if force: - cmd = ['replace', '--force', '-f', sfile_path] - - atexit.register(OpenShiftOC.cleanup, [sfile_path]) - - return self.oc_cmd(cmd) - - def prep_secret(self, files): - ''' return what the secret would look like if created - This is accomplished by passing -ojson. This will most likely change in the future - ''' - secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files] - cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name] - cmd.extend(secrets) - - return self.oc_cmd(cmd, output=True) - def oc_cmd(self, cmd, output=False): '''Base command for oc ''' + #cmds = ['/usr/bin/oc', '--config', self.kubeconfig] cmds = ['/usr/bin/oc'] cmds.extend(cmd) @@ -171,6 +88,36 @@ class OpenShiftOC(object): "results": {} } +class Utils(object): + ''' utilities for openshiftcli modules ''' + @staticmethod + def create_file(rname, data, ftype=None): + ''' create a file in tmp with name and contents''' + path = os.path.join('/tmp', rname) + with open(path, 'w') as fds: + if ftype == 'yaml': + fds.write(yaml.dump(data, default_flow_style=False)) + + elif ftype == 'json': + fds.write(json.dumps(data)) + else: + fds.write(data) + + # Register cleanup when module is done + atexit.register(Utils.cleanup, [path]) + return path + + @staticmethod + def create_files_from_contents(data): + '''Turn an array of dict: filename, content into a files array''' + files = [] + + for sfile in data: + path = Utils.create_file(sfile['path'], sfile['content']) + files.append(path) + + return files + @staticmethod def cleanup(files): '''Clean up on exit ''' @@ -182,83 +129,167 @@ class OpenShiftOC(object): os.remove(sfile) -def exists(results, _name): - ''' Check to see if the results include the name ''' - if not results: + @staticmethod + def exists(results, _name): + ''' Check to see if the results include the name ''' + if not results: + return False + + + if Utils.find_result(results, _name): + return True + return False - if find_result(results, _name): + @staticmethod + def find_result(results, _name): + ''' Find the specified result by name''' + rval = None + for result in results: + if result.has_key('metadata') and result['metadata']['name'] == _name: + rval = result + break + + return rval + + @staticmethod + def get_resource_file(sfile, sfile_type='yaml'): + ''' return the service file ''' + contents = None + with open(sfile) as sfd: + contents = sfd.read() + + if sfile_type == 'yaml': + contents = yaml.load(contents) + elif sfile_type == 'json': + contents = json.loads(contents) + + return contents + + # Disabling too-many-branches. This is a yaml dictionary comparison function + # pylint: disable=too-many-branches,too-many-return-statements + @staticmethod + def check_def_equal(user_def, result_def, debug=False): + ''' Given a user defined definition, compare it with the results given back by our query. ''' + + # Currently these values are autogenerated and we do not need to check them + skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace'] + + for key, value in result_def.items(): + if key in skip: + continue + + # Both are lists + if isinstance(value, list): + if not isinstance(user_def[key], list): + return False + + # lists should be identical + if value != user_def[key]: + return False + + # recurse on a dictionary + elif isinstance(value, dict): + if not isinstance(user_def[key], dict): + if debug: + print "dict returned false not instance of dict" + return False + + # before passing ensure keys match + api_values = set(value.keys()) - set(skip) + user_values = set(user_def[key].keys()) - set(skip) + if api_values != user_values: + if debug: + print api_values + print user_values + print "keys are not equal in dict" + return False + + result = Utils.check_def_equal(user_def[key], value, debug=debug) + if not result: + if debug: + print "dict returned false" + return False + + # Verify each key, value pair is the same + else: + if not user_def.has_key(key) or value != user_def[key]: + if debug: + print "value not equal; user_def does not have key" + print value + print user_def[key] + return False + return True - return False - -def find_result(results, _name): - ''' Find the specified result by name''' - rval = None - for result in results: - #print "%s == %s" % (result['metadata']['name'], name) - if result.has_key('metadata') and result['metadata']['name'] == _name: - rval = result - break - - return rval - -# Disabling too-many-branches. This is a yaml dictionary comparison function -# pylint: disable=too-many-branches,too-many-return-statements -def check_def_equal(user_def, result_def, debug=False): - ''' Given a user defined definition, compare it with the results given back by our query. ''' - - # Currently these values are autogenerated and we do not need to check them - skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace'] - - for key, value in result_def.items(): - if key in skip: - continue - - # Both are lists - if isinstance(value, list): - if not isinstance(user_def[key], list): - return False - - # lists should be identical - if value != user_def[key]: - return False - - # recurse on a dictionary - elif isinstance(value, dict): - if not isinstance(user_def[key], dict): - if debug: - print "dict returned false not instance of dict" - return False - - # before passing ensure keys match - api_values = set(value.keys()) - set(skip) - user_values = set(user_def[key].keys()) - set(skip) - if api_values != user_values: - if debug: - print api_values - print user_values - print "keys are not equal in dict" - return False - - result = check_def_equal(user_def[key], value) - if not result: - if debug: - print "dict returned false" - return False - - # Verify each key, value pair is the same - else: - if not user_def.has_key(key) or value != user_def[key]: - if debug: - print "value not equal; user_def does not have key" - print value - print user_def[key] - return False +class Secret(OpenShiftCLI): + ''' Class to wrap the oc command line tools + ''' + def __init__(self, + namespace, + secret_name=None, + kubeconfig='/etc/origin/master/admin.kubeconfig', + verbose=False): + ''' Constructor for OpenshiftOC ''' + super(Secret, self).__init__(namespace, kubeconfig) + self.namespace = namespace + self.name = secret_name + self.kubeconfig = kubeconfig + self.verbose = verbose + + def get_secrets(self): + '''return a secret by name ''' + return self.get('secrets', self.name) + + def delete_secret(self): + '''return all pods ''' + return self.delete('secrets', self.name) + + def secret_new(self, files=None, contents=None): + '''Create a secret with all pods ''' + if not files: + files = Utils.create_files_from_contents(contents) + + secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files] + cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name] + cmd.extend(secrets) + + return self.oc_cmd(cmd) + + def update_secret(self, files, force=False): + '''run update secret + + This receives a list of file names and converts it into a secret. + The secret is then written to disk and passed into the `oc replace` command. + ''' + secret = self.prep_secret(files) + if secret['returncode'] != 0: + return secret + + sfile_path = '/tmp/%s' % self.name + with open(sfile_path, 'w') as sfd: + sfd.write(json.dumps(secret['results'])) + + atexit.register(Utils.cleanup, [sfile_path]) + + return self.replace(sfile_path, force=force) + + def prep_secret(self, files=None, contents=None): + ''' return what the secret would look like if created + This is accomplished by passing -ojson. This will most likely change in the future + ''' + if not files: + files = Utils.create_files_from_contents(contents) + + secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files] + cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name] + cmd.extend(secrets) + + return self.oc_cmd(cmd, output=True) - return True +# pylint: disable=too-many-branches def main(): ''' ansible oc module for secrets @@ -281,10 +312,10 @@ def main(): supports_check_mode=True, ) - occmd = OpenShiftOC(module.params['namespace'], - module.params['name'], - kubeconfig=module.params['kubeconfig'], - verbose=module.params['debug']) + occmd = Secret(module.params['namespace'], + module.params['name'], + kubeconfig=module.params['kubeconfig'], + verbose=module.params['debug']) state = module.params['state'] @@ -302,7 +333,7 @@ def main(): # Delete ######## if state == 'absent': - if not exists(api_rval['results'], module.params['name']): + if not Utils.exists(api_rval['results'], module.params['name']): module.exit_json(changed=False, state="absent") if module.check_mode: @@ -316,39 +347,39 @@ def main(): if module.params['files']: files = module.params['files'] elif module.params['contents']: - files = OpenShiftOC.create_files_from_contents(module.params['contents']) + files = Utils.create_files_from_contents(module.params['contents']) else: module.fail_json(msg='Either specify files or contents.') ######## # Create ######## - if not exists(api_rval['results'], module.params['name']): + if not Utils.exists(api_rval['results'], module.params['name']): if module.check_mode: module.exit_json(change=False, msg='Would have performed a create.') - api_rval = occmd.secret_new(files) + api_rval = occmd.secret_new(module.params['files'], module.params['contents']) # Remove files if files and module.params['delete_after']: - OpenShiftOC.cleanup(files) + Utils.cleanup(files) module.exit_json(changed=True, results=api_rval, state="present") ######## # Update ######## - secret = occmd.prep_secret(files) + secret = occmd.prep_secret(module.params['files'], module.params['contents']) if secret['returncode'] != 0: module.fail_json(msg=secret) - if check_def_equal(secret['results'], api_rval['results'][0]): + if Utils.check_def_equal(secret['results'], api_rval['results'][0]): # Remove files if files and module.params['delete_after']: - OpenShiftOC.cleanup(files) + Utils.cleanup(files) module.exit_json(changed=False, results=secret['results'], state="present") @@ -358,8 +389,8 @@ def main(): api_rval = occmd.update_secret(files, force=module.params['force']) # Remove files - if files and module.params['delete_after']: - OpenShiftOC.cleanup(files) + if secret and module.params['delete_after']: + Utils.cleanup(files) if api_rval['returncode'] != 0: module.fail_json(msg=api_rval) diff --git a/roles/lib_openshift_api/library/oc_service.py b/roles/lib_openshift_api/library/oc_service.py new file mode 100644 index 000000000..e7bd2514e --- /dev/null +++ b/roles/lib_openshift_api/library/oc_service.py @@ -0,0 +1,378 @@ +#!/usr/bin/env python +''' + OpenShiftCLI class that wraps the oc commands in a subprocess +''' +import atexit +import json +import os +import shutil +import subprocess +import yaml + +class OpenShiftCLI(object): + ''' Class to wrap the oc command line tools ''' + def __init__(self, + namespace, + kubeconfig='/etc/origin/master/admin.kubeconfig', + verbose=False): + ''' Constructor for OpenshiftOC ''' + self.namespace = namespace + self.verbose = verbose + self.kubeconfig = kubeconfig + + def replace(self, fname, force=False): + '''return all pods ''' + cmd = ['replace', '-f', fname] + if force: + cmd = ['replace', '--force', '-f', fname] + return self.oc_cmd(cmd) + + def create(self, fname): + '''return all pods ''' + return self.oc_cmd(['create', '-f', fname, '-n', self.namespace]) + + def delete(self, resource, rname): + '''return all pods ''' + return self.oc_cmd(['delete', resource, rname, '-n', self.namespace]) + + def get(self, resource, rname=None): + '''return a secret by name ''' + cmd = ['get', resource, '-o', 'json', '-n', self.namespace] + if rname: + cmd.append(rname) + + rval = self.oc_cmd(cmd, output=True) + + # Ensure results are retuned in an array + if rval.has_key('items'): + rval['results'] = rval['items'] + elif not isinstance(rval['results'], list): + rval['results'] = [rval['results']] + + return rval + + def oc_cmd(self, cmd, output=False): + '''Base command for oc ''' + #cmds = ['/usr/bin/oc', '--config', self.kubeconfig] + cmds = ['/usr/bin/oc'] + cmds.extend(cmd) + + results = '' + + if self.verbose: + print ' '.join(cmds) + + proc = subprocess.Popen(cmds, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env={'KUBECONFIG': self.kubeconfig}) + proc.wait() + if proc.returncode == 0: + if output: + try: + results = json.loads(proc.stdout.read()) + except ValueError as err: + if "No JSON object could be decoded" in err.message: + results = err.message + + if self.verbose: + print proc.stderr.read() + print results + print + + return {"returncode": proc.returncode, "results": results} + + return {"returncode": proc.returncode, + "stderr": proc.stderr.read(), + "stdout": proc.stdout.read(), + "results": {} + } + +class Utils(object): + ''' utilities for openshiftcli modules ''' + @staticmethod + def create_file(rname, data, ftype=None): + ''' create a file in tmp with name and contents''' + path = os.path.join('/tmp', rname) + with open(path, 'w') as fds: + if ftype == 'yaml': + fds.write(yaml.dump(data, default_flow_style=False)) + + elif ftype == 'json': + fds.write(json.dumps(data)) + else: + fds.write(data) + + # Register cleanup when module is done + atexit.register(Utils.cleanup, [path]) + return path + + @staticmethod + def create_files_from_contents(data): + '''Turn an array of dict: filename, content into a files array''' + files = [] + + for sfile in data: + path = Utils.create_file(sfile['path'], sfile['content']) + files.append(path) + + return files + + @staticmethod + def cleanup(files): + '''Clean up on exit ''' + for sfile in files: + if os.path.exists(sfile): + if os.path.isdir(sfile): + shutil.rmtree(sfile) + elif os.path.isfile(sfile): + os.remove(sfile) + + + @staticmethod + def exists(results, _name): + ''' Check to see if the results include the name ''' + if not results: + return False + + + if Utils.find_result(results, _name): + return True + + return False + + @staticmethod + def find_result(results, _name): + ''' Find the specified result by name''' + rval = None + for result in results: + if result.has_key('metadata') and result['metadata']['name'] == _name: + rval = result + break + + return rval + + @staticmethod + def get_resource_file(sfile, sfile_type='yaml'): + ''' return the service file ''' + contents = None + with open(sfile) as sfd: + contents = sfd.read() + + if sfile_type == 'yaml': + contents = yaml.load(contents) + elif sfile_type == 'json': + contents = json.loads(contents) + + return contents + + # Disabling too-many-branches. This is a yaml dictionary comparison function + # pylint: disable=too-many-branches,too-many-return-statements + @staticmethod + def check_def_equal(user_def, result_def, debug=False): + ''' Given a user defined definition, compare it with the results given back by our query. ''' + + # Currently these values are autogenerated and we do not need to check them + skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace'] + + for key, value in result_def.items(): + if key in skip: + continue + + # Both are lists + if isinstance(value, list): + if not isinstance(user_def[key], list): + return False + + # lists should be identical + if value != user_def[key]: + return False + + # recurse on a dictionary + elif isinstance(value, dict): + if not isinstance(user_def[key], dict): + if debug: + print "dict returned false not instance of dict" + return False + + # before passing ensure keys match + api_values = set(value.keys()) - set(skip) + user_values = set(user_def[key].keys()) - set(skip) + if api_values != user_values: + if debug: + print api_values + print user_values + print "keys are not equal in dict" + return False + + result = Utils.check_def_equal(user_def[key], value, debug=debug) + if not result: + if debug: + print "dict returned false" + return False + + # Verify each key, value pair is the same + else: + if not user_def.has_key(key) or value != user_def[key]: + if debug: + print "value not equal; user_def does not have key" + print value + print user_def[key] + return False + + return True + +class Service(OpenShiftCLI): + ''' Class to wrap the oc command line tools + ''' + def __init__(self, + namespace, + service_name=None, + kubeconfig='/etc/origin/master/admin.kubeconfig', + verbose=False): + ''' Constructor for OpenshiftOC ''' + super(Service, self).__init__(namespace, kubeconfig) + self.namespace = namespace + self.name = service_name + self.verbose = verbose + self.kubeconfig = kubeconfig + + def create_service(self, sfile): + ''' create the service ''' + return self.create(sfile) + + def get_services(self): + '''return a secret by name ''' + return self.get('services', self.name) + + def delete_service(self): + '''return all pods ''' + return self.delete('service', self.name) + + def update_service(self, sfile, force=False): + '''run update service + + This receives a list of file names and converts it into a secret. + The secret is then written to disk and passed into the `oc replace` command. + ''' + return self.replace(sfile, force=force) + + +# pylint: disable=too-many-branches +def main(): + ''' + ansible oc module for services + ''' + + module = AnsibleModule( + argument_spec=dict( + kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), + state=dict(default='present', type='str', + choices=['present', 'absent', 'list']), + debug=dict(default=False, type='bool'), + namespace=dict(default='default', type='str'), + name=dict(default=None, type='str'), + service_file=dict(default=None, type='str'), + input_type=dict(default='yaml', + choices=['json', 'yaml'], + type='str'), + delete_after=dict(default=False, type='bool'), + contents=dict(default=None, type='list'), + force=dict(default=False, type='bool'), + ), + mutually_exclusive=[["contents", "service_file"]], + + supports_check_mode=True, + ) + occmd = Service(module.params['namespace'], + module.params['name'], + kubeconfig=module.params['kubeconfig'], + verbose=module.params['debug']) + + state = module.params['state'] + + api_rval = occmd.get_services() + + ##### + # Get + ##### + if state == 'list': + module.exit_json(changed=False, results=api_rval['results'], state="list") + + if not module.params['name']: + module.fail_json(msg='Please specify a name when state is absent|present.') + ######## + # Delete + ######## + if state == 'absent': + if not Utils.exists(api_rval['results'], module.params['name']): + module.exit_json(changed=False, state="absent") + + if module.check_mode: + module.exit_json(change=False, msg='Would have performed a delete.') + + api_rval = occmd.delete_service() + module.exit_json(changed=True, results=api_rval, state="absent") + + + if state == 'present': + if module.params['service_file']: + sfile = module.params['service_file'] + elif module.params['contents']: + sfile = Utils.create_files_from_contents(module.params['contents']) + else: + module.fail_json(msg='Either specify files or contents.') + + ######## + # Create + ######## + if not Utils.exists(api_rval['results'], module.params['name']): + + if module.check_mode: + module.exit_json(change=False, msg='Would have performed a create.') + + api_rval = occmd.create_service(sfile) + + # Remove files + if sfile and module.params['delete_after']: + Utils.cleanup([sfile]) + + module.exit_json(changed=True, results=api_rval, state="present") + + ######## + # Update + ######## + sfile_contents = Utils.get_resource_file(sfile, module.params['input_type']) + if Utils.check_def_equal(sfile_contents, api_rval['results'][0]): + + # Remove files + if module.params['service_file'] and module.params['delete_after']: + Utils.cleanup([sfile]) + + module.exit_json(changed=False, results=api_rval['results'][0], state="present") + + if module.check_mode: + module.exit_json(change=False, msg='Would have performed an update.') + + api_rval = occmd.update_service(sfile, force=module.params['force']) + + # Remove files + if sfile and module.params['delete_after']: + Utils.cleanup([sfile]) + + if api_rval['returncode'] != 0: + module.fail_json(msg=api_rval) + + + module.exit_json(changed=True, results=api_rval, state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_yaml_editor/library/yedit.py b/roles/lib_yaml_editor/library/yedit.py new file mode 100644 index 000000000..9b565d0c7 --- /dev/null +++ b/roles/lib_yaml_editor/library/yedit.py @@ -0,0 +1,220 @@ +#!/usr/bin/env python +''' +module for openshift cloud secrets +''' + +import os +import yaml + +class YeditException(Exception): + ''' Exception class for Yedit ''' + pass + +class Yedit(object): + ''' Class to modify yaml files ''' + + def __init__(self, filename): + self.filename = filename + self.__yaml_dict = None + self.get() + + @property + def yaml_dict(self): + ''' getter method for yaml_dict ''' + return self.__yaml_dict + + @yaml_dict.setter + def yaml_dict(self, value): + ''' setter method for yaml_dict ''' + self.__yaml_dict = value + + @staticmethod + def remove_entry(data, keys): + ''' remove an item from a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + item = c + ''' + if "." in keys: + key, rest = keys.split(".", 1) + if key in data.keys(): + Yedit.remove_entry(data[key], rest) + else: + del data[keys] + + @staticmethod + def add_entry(data, keys, item): + ''' Add an item to a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + item = c + ''' + if "." in keys: + key, rest = keys.split(".", 1) + if key not in data: + data[key] = {} + + if not isinstance(data, dict): + raise YeditException('Invalid add_entry called on a [%s] of type [%s].' % (data, type(data))) + else: + Yedit.add_entry(data[key], rest, item) + + else: + data[keys] = item + + + @staticmethod + def get_entry(data, keys): + ''' Get an item from a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + return c + ''' + if keys and "." in keys: + key, rest = keys.split(".", 1) + if not isinstance(data[key], dict): + raise YeditException('Invalid get_entry called on a [%s] of type [%s].' % (data, type(data))) + + else: + return Yedit.get_entry(data[key], rest) + + else: + return data.get(keys, None) + + + def write(self): + ''' write to file ''' + with open(self.filename, 'w') as yfd: + yfd.write(yaml.dump(self.yaml_dict, default_flow_style=False)) + + def read(self): + ''' write to file ''' + # check if it exists + if not self.exists(): + return None + + contents = None + with open(self.filename) as yfd: + contents = yfd.read() + + return contents + + def exists(self): + ''' return whether file exists ''' + if os.path.exists(self.filename): + return True + + return False + def get(self): + ''' return yaml file ''' + contents = self.read() + + if not contents: + return None + + # check if it is yaml + try: + self.yaml_dict = yaml.load(contents) + except yaml.YAMLError as _: + # Error loading yaml + return None + + return self.yaml_dict + + def delete(self, key): + ''' put key, value into a yaml file ''' + try: + entry = Yedit.get_entry(self.yaml_dict, key) + except KeyError as _: + entry = None + if not entry: + return (False, self.yaml_dict) + + Yedit.remove_entry(self.yaml_dict, key) + self.write() + return (True, self.get()) + + def put(self, key, value): + ''' put key, value into a yaml file ''' + try: + entry = Yedit.get_entry(self.yaml_dict, key) + except KeyError as _: + entry = None + + if entry == value: + return (False, self.yaml_dict) + + Yedit.add_entry(self.yaml_dict, key, value) + self.write() + return (True, self.get()) + + def create(self, key, value): + ''' create the file ''' + if not self.exists(): + self.yaml_dict = {key: value} + self.write() + return (True, self.get()) + + return (False, self.get()) + + +def main(): + ''' + ansible oc module for secrets + ''' + + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', type='str', + choices=['present', 'absent', 'list']), + debug=dict(default=False, type='bool'), + + src=dict(default=None, type='str'), + key=dict(default=None, type='str'), + value=dict(default=None, type='str'), + value_format=dict(default='yaml', choices=['yaml', 'json'], type='str'), + ), + mutually_exclusive=[["contents", "files"]], + + supports_check_mode=True, + ) + state = module.params['state'] + + yamlfile = Yedit(module.params['src']) + + rval = yamlfile.get() + if not rval and state != 'present': + module.fail_json(msg='Error opening file [%s]. Verify that the' + \ + ' file exists, that it is has correct permissions, and is valid yaml.') + + if state == 'list': + module.exit_json(changed=False, results=rval, state="list") + + if state == 'absent': + rval = yamlfile.delete(module.params['key']) + module.exit_json(changed=rval[0], results=rval[1], state="absent") + + if state == 'present': + + if module.params['value_format'] == 'yaml': + value = yaml.load(module.params['value']) + elif module.params['value_format'] == 'json': + value = json.loads(module.params['value']) + + if rval: + rval = yamlfile.put(module.params['key'], value) + module.exit_json(changed=rval[0], results=rval[1], state="present") + + rval = yamlfile.create(module.params['key'], value) + module.exit_json(changed=rval[0], results=rval[1], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 263daf210..30e29787a 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1407,6 +1407,7 @@ class OpenShiftFacts(object): if 'node' in roles: defaults['node'] = dict(labels={}, annotations={}, iptables_sync_period='5s', + local_quota_per_fsgroup="", set_node_ip=False) if 'docker' in roles: diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index d2ff1b4b7..2a651df65 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -18,7 +18,7 @@ failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0" changed_when: osmiq_create_mi_project.rc == 0 -- name: Create Service Account +- name: Create Admin Service Account shell: > echo {{ manageiq_service_account | to_json | quote }} | {{ openshift.common.client_binary }} create @@ -29,6 +29,17 @@ failed_when: "'already exists' not in osmiq_create_service_account.stderr and osmiq_create_service_account.rc != 0" changed_when: osmiq_create_service_account.rc == 0 +- name: Create Image Inspector Service Account + shell: > + echo {{ manageiq_image_inspector_service_account | to_json | quote }} | + {{ openshift.common.client_binary }} create + -n management-infra + --config={{manage_iq_tmp_conf}} + -f - + register: osmiq_create_service_account + failed_when: "'already exists' not in osmiq_create_service_account.stderr and osmiq_create_service_account.rc != 0" + changed_when: osmiq_create_service_account.rc == 0 + - name: Create Cluster Role shell: > echo {{ manageiq_cluster_role | to_json | quote }} | diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml index 77e1c304b..69ee2cb4c 100644 --- a/roles/openshift_manageiq/vars/main.yml +++ b/roles/openshift_manageiq/vars/main.yml @@ -15,6 +15,12 @@ manageiq_service_account: metadata: name: management-admin +manageiq_image_inspector_service_account: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: inspector-admin + manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig manage_iq_tasks: @@ -22,3 +28,5 @@ manage_iq_tasks: - policy add-role-to-user -n management-infra management-infra-admin -z management-admin - policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin - policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin + - policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin + - policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 4b5832ab7..ca1e26459 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -31,6 +31,7 @@ node_image: "{{ osn_image | default(None) }}" ovs_image: "{{ osn_ovs_image | default(None) }}" proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" + local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}" # We have to add tuned-profiles in the same transaction otherwise we run into depsolving # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 67975d372..28cb1ea26 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -38,3 +38,6 @@ volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes proxyArguments: proxy-mode: - {{ openshift.node.proxy_mode }} +volumeConfig: + localQuota: + perFSGroup: {{ openshift.node.local_quota_per_fsgroup }} diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 53b1d6230..65d2291bb 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -12,7 +12,7 @@ Wants={{ openshift.common.service_type }}-master.service [Service] EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev {{ openshift.node.node_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node SyslogIdentifier={{ openshift.common.service_type }}-node diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml index f34fa7b74..5dd28d52a 100644 --- a/roles/openshift_serviceaccounts/tasks/main.yml +++ b/roles/openshift_serviceaccounts/tasks/main.yml @@ -9,7 +9,8 @@ - name: create the service account shell: > echo {{ lookup('template', '../templates/serviceaccount.j2') - | from_yaml | to_json | quote }} | {{ openshift.common.client_binary }} create -f - + | from_yaml | to_json | quote }} | {{ openshift.common.client_binary }} + -n {{ openshift_serviceaccounts_namespace }} create -f - when: item.1.rc != 0 with_together: - openshift_serviceaccounts_names diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml index 705066b35..1f2117b2c 100644 --- a/roles/os_zabbix/vars/template_openshift_master.yml +++ b/roles/os_zabbix/vars/template_openshift_master.yml @@ -93,6 +93,18 @@ g_template_openshift_master: applications: - Openshift Master + - key: openshift.master.pv.space.total + description: Shows the total space of pv + value_type: int + applications: + - Openshift Master + + - key: openshift.master.pv.space.available + description: Shows the available space of pv + value_type: int + applications: + - Openshift Master + - key: openshift.master.pv.total.count description: Total number of Persistent Volumes in the Openshift Cluster value_type: int @@ -279,6 +291,29 @@ g_template_openshift_master: applications: - Openshift Master Metrics + zdiscoveryrules: + - name: disc.pv + key: disc.pv + lifetime: 1 + description: "Dynamically register the Persistent Volumes" + + zitemprototypes: + - discoveryrule_key: disc.pv + name: "disc.pv.count.{#OSO_PV}" + key: "disc.pv.count[{#OSO_PV}]" + value_type: int + description: "Number of PV's of this size" + applications: + - Openshift Master + + - discoveryrule_key: disc.pv + name: "disc.pv.count.available.{#OSO_PV}" + key: "disc.pv.count.available[{#OSO_PV}]" + value_type: int + description: "Number of PV's of this size that are available" + applications: + - Openshift Master + ztriggers: - name: 'Openshift Master process not running on {HOST.NAME}' expression: '{Template Openshift Master:openshift.master.process.count.max(#3)}<1' diff --git a/test/units/yedit_test.py b/test/units/yedit_test.py new file mode 100755 index 000000000..cdd2d2b59 --- /dev/null +++ b/test/units/yedit_test.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python2 +''' + Unit tests for yedit +''' + +import unittest +import os +import yaml + +class YeditException(Exception): + ''' Exception class for Yedit ''' + pass + +class Yedit(object): + ''' Class to modify yaml files ''' + + def __init__(self, filename): + self.filename = filename + self.__yaml_dict = None + self.get() + + @property + def yaml_dict(self): + ''' get property for yaml_dict ''' + return self.__yaml_dict + + @yaml_dict.setter + def yaml_dict(self, value): + ''' setter method for yaml_dict ''' + self.__yaml_dict = value + + @staticmethod + def remove_entry(data, keys): + ''' remove an item from a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + item = c + ''' + if "." in keys: + key, rest = keys.split(".", 1) + if key in data.keys(): + Yedit.remove_entry(data[key], rest) + else: + del data[keys] + + @staticmethod + def add_entry(data, keys, item): + ''' Add an item to a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + item = c + ''' + if "." in keys: + key, rest = keys.split(".", 1) + if key not in data: + data[key] = {} + + if not isinstance(data, dict): + raise YeditException('Invalid add_entry called on data [%s].' % data) + else: + Yedit.add_entry(data[key], rest, item) + + else: + data[keys] = item + + + @staticmethod + def get_entry(data, keys): + ''' Get an item from a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + return c + ''' + if keys and "." in keys: + key, rest = keys.split(".", 1) + if not isinstance(data[key], dict): + raise YeditException('Invalid get_entry called on a [%s] of type [%s].' % (data, type(data))) + + else: + return Yedit.get_entry(data[key], rest) + + else: + return data.get(keys, None) + + + def write(self): + ''' write to file ''' + with open(self.filename, 'w') as yfd: + yfd.write(yaml.dump(self.yaml_dict, default_flow_style=False)) + + def read(self): + ''' write to file ''' + # check if it exists + if not self.exists(): + return None + + contents = None + with open(self.filename) as yfd: + contents = yfd.read() + + return contents + + def exists(self): + ''' return whether file exists ''' + if os.path.exists(self.filename): + return True + + return False + def get(self): + ''' return yaml file ''' + contents = self.read() + + if not contents: + return None + + # check if it is yaml + try: + self.yaml_dict = yaml.load(contents) + except yaml.YAMLError as _: + # Error loading yaml + return None + + return self.yaml_dict + + def delete(self, key): + ''' put key, value into a yaml file ''' + try: + entry = Yedit.get_entry(self.yaml_dict, key) + except KeyError as _: + entry = None + if not entry: + return (False, self.yaml_dict) + + Yedit.remove_entry(self.yaml_dict, key) + self.write() + return (True, self.get()) + + def put(self, key, value): + ''' put key, value into a yaml file ''' + try: + entry = Yedit.get_entry(self.yaml_dict, key) + except KeyError as _: + entry = None + + if entry == value: + return (False, self.yaml_dict) + + Yedit.add_entry(self.yaml_dict, key, value) + self.write() + return (True, self.get()) + + def create(self, key, value): + ''' create the file ''' + if not self.exists(): + self.yaml_dict = {key: value} + self.write() + return (True, self.get()) + + return (False, self.get()) + + + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name +class YeditTest(unittest.TestCase): + ''' + Test class for yedit + ''' + data = {'a': 'a', + 'b': {'c': {'d': ['e', 'f', 'g']}}, + } + + filename = 'yedit_test.yml' + + def setUp(self): + ''' setup method will create a file and set to known configuration ''' + yed = Yedit(YeditTest.filename) + yed.yaml_dict = YeditTest.data + yed.write() + + def test_get(self): + ''' Testing a get ''' + yed = Yedit('yedit_test.yml') + + self.assertEqual(yed.yaml_dict, self.data) + + def test_write(self): + ''' Testing a simple write ''' + yed = Yedit('yedit_test.yml') + yed.put('key1', 1) + yed.write() + yed.get() + self.assertTrue(yed.yaml_dict.has_key('key1')) + self.assertEqual(yed.yaml_dict['key1'], 1) + + def test_write_x_y_z(self): + '''Testing a write of multilayer key''' + yed = Yedit('yedit_test.yml') + yed.put('x.y.z', 'modified') + yed.write() + self.assertEqual(Yedit.get_entry(yed.get(), 'x.y.z'), 'modified') + + def test_delete_a(self): + '''Testing a simple delete ''' + yed = Yedit('yedit_test.yml') + yed.delete('a') + yed.write() + yed.get() + self.assertTrue(not yed.yaml_dict.has_key('a')) + + def test_delete_b_c(self): + '''Testing delete of layered key ''' + yed = Yedit('yedit_test.yml') + yed.delete('b.c') + yed.write() + yed.get() + self.assertTrue(yed.yaml_dict.has_key('b')) + self.assertFalse(yed.yaml_dict['b'].has_key('c')) + + def test_create(self): + '''Testing a create ''' + os.unlink(YeditTest.filename) + yed = Yedit('yedit_test.yml') + yed.create('foo', 'bar') + yed.write() + yed.get() + self.assertTrue(yed.yaml_dict.has_key('foo')) + self.assertTrue(yed.yaml_dict['foo'], 'bar') + + def tearDown(self): + '''TearDown method''' + os.unlink(YeditTest.filename) + +if __name__ == "__main__": + unittest.main() |