summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rwxr-xr-xinventory/multi_inventory.py30
-rw-r--r--openshift-ansible.spec16
-rw-r--r--playbooks/common/openshift-master/config.yml3
-rw-r--r--roles/lib_openshift_api/library/oc_secret.py (renamed from roles/lib_openshift_api/library/oc_secrets.py)406
-rw-r--r--roles/lib_openshift_api/library/oc_service.py356
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py1
-rw-r--r--roles/openshift_node/tasks/main.yml1
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j23
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml3
11 files changed, 610 insertions, 213 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 96b4cbcf4..f336d0770 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.62-1 ./
+3.0.64-1 ./
diff --git a/inventory/multi_inventory.py b/inventory/multi_inventory.py
index 57be259f7..be597267e 100755
--- a/inventory/multi_inventory.py
+++ b/inventory/multi_inventory.py
@@ -316,17 +316,29 @@ class MultiInventory(object):
inventory[key].append(name)
def apply_group_selectors(self, inventory, group_selectors):
- ''' Apply the account config for clone groups '''
+ ''' Apply the account config for group selectors '''
_ = self # Here for pylint. wanted an instance method instead of static
- for selector in group_selectors:
- if inventory.has_key(selector['from_group']):
- inventory[selector['from_group']].sort()
- inventory[selector['name']] = inventory[selector['from_group']][0:selector['count']]
- for host in inventory[selector['from_group']]:
- if host in inventory[selector['name']]:
- inventory['_meta']['hostvars'][host][selector['name']] = True
+ # There could be multiple clusters per account. We need to process these selectors
+ # based upon the oo_clusterid_ variable.
+ clusterids = [group for group in inventory if "oo_clusterid_" in group]
+
+ for clusterid in clusterids:
+ for selector in group_selectors:
+ if inventory.has_key(selector['from_group']):
+ hosts = list(set(inventory[clusterid]) & set(inventory[selector['from_group']]))
+ hosts.sort()
+
+ # Multiple clusters in an account
+ if inventory.has_key(selector['name']):
+ inventory[selector['name']].extend(hosts[0:selector['count']])
else:
- inventory['_meta']['hostvars'][host][selector['name']] = False
+ inventory[selector['name']] = hosts[0:selector['count']]
+
+ for host in hosts:
+ if host in inventory[selector['name']]:
+ inventory['_meta']['hostvars'][host][selector['name']] = True
+ else:
+ inventory['_meta']['hostvars'][host][selector['name']] = False
def apply_account_config(self, acc_config):
''' Apply account config settings '''
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index cd090d0c4..f624b6f96 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.62
+Version: 3.0.64
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -279,6 +279,20 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Mar 23 2016 Troy Dawson <tdawson@redhat.com> 3.0.64-1
+- Latest cli updates from generated files (kwoodson@redhat.com)
+- Add /dev to node containers (sdodson@redhat.com)
+- Fix indention (whearn@redhat.com)
+- Support setting local storage perFSGroup quota in node config.
+ (dgoodwin@redhat.com)
+- Fix line break (whearn@redhat.com)
+- Lock down permissions on named certificates (elyscape@gmail.com)
+- Add namespace flag to oc create (whearn@redhat.com)
+
+* Mon Mar 21 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.63-1
+- Modified group selectors for muliple clusters per account
+ (kwoodson@redhat.com)
+
* Fri Mar 18 2016 Troy Dawson <tdawson@redhat.com> 3.0.62-1
- Yaml editor first attempt (kwoodson@redhat.com)
- libvirt cluster variables cleanup (pep@redhat.com)
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 972427c53..f1eaf8e16 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -311,13 +311,14 @@
file:
path: "{{ named_certs_dir }}"
state: directory
+ mode: 0700
when: named_certs_specified | bool
- name: Land named certificates
copy: src="{{ item.certfile }}" dest="{{ named_certs_dir }}"
with_items: openshift_master_named_certificates
when: named_certs_specified | bool
- name: Land named certificate keys
- copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}"
+ copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}" mode=0600
with_items: openshift_master_named_certificates
when: named_certs_specified | bool
diff --git a/roles/lib_openshift_api/library/oc_secrets.py b/roles/lib_openshift_api/library/oc_secret.py
index 841c14692..d69d490ac 100644
--- a/roles/lib_openshift_api/library/oc_secrets.py
+++ b/roles/lib_openshift_api/library/oc_secret.py
@@ -1,54 +1,189 @@
#!/usr/bin/env python
'''
-module for openshift cloud secrets
+ OpenShiftCLI class that wraps the oc commands in a subprocess
'''
-# Examples:
-#
-# # to initiate and use /etc/origin/master/admin.kubeconfig file for auth
-# - name: list secrets
-# oc_secrets:
-# state: list
-# namespace: default
-#
-# # To get a specific secret named 'mysecret'
-# - name: list secrets
-# oc_secrets:
-# state: list
-# namespace: default
-# name: mysecret
-#
-# # To create a secret:
-# # This module expects the user to place the files on the remote server and pass them in.
-# - name: create a secret from file
-# oc_secrets:
-# state: present
-# namespace: default
-# name: mysecret
-# files:
-# - /tmp/config.yml
-# - /tmp/passwords.yml
-# delete_after: False
-
-# # To create a secret:
-# # This module expects the user to place the files on the remote server and pass them in.
-# - name: create a secret from content
-# oc_secrets:
-# state: present
-# namespace: default
-# name: mysecret
-# contents:
-# - path: /tmp/config.yml
-# content: "value=True\n"
-# - path: /tmp/passwords.yml
-# content: "test1\ntest2\ntest3\ntest4\n"
-#
-
+import atexit
+import json
import os
import shutil
-import json
-import atexit
+import subprocess
+import yaml
+
+# The base class is here to share methods.
+# Currently there is only 1 but will grow in the future.
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the oc command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+
+ def oc_cmd(self, cmd, output=False):
+ '''Base command for oc '''
+ #cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
+ cmds = ['/usr/bin/oc']
+ cmds.extend(cmd)
+
+ results = ''
-class OpenShiftOC(object):
+ if self.verbose:
+ print ' '.join(cmds)
+
+ proc = subprocess.Popen(cmds,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+ proc.wait()
+ if proc.returncode == 0:
+ if output:
+ try:
+ results = json.loads(proc.stdout.read())
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.message:
+ results = err.message
+
+ if self.verbose:
+ print proc.stderr.read()
+ print results
+ print
+
+ return {"returncode": proc.returncode, "results": results}
+
+ return {"returncode": proc.returncode,
+ "stderr": proc.stderr.read(),
+ "stdout": proc.stdout.read(),
+ "results": {}
+ }
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_files_from_contents(data):
+ '''Turn an array of dict: filename, content into a files array'''
+ files = []
+
+ for sfile in data:
+ path = os.path.join('/tmp', sfile['path'])
+ with open(path, 'w') as fds:
+ fds.write(sfile['content'])
+ files.append(path)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, files)
+ return files
+
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ #print "%s == %s" % (result['metadata']['name'], name)
+ if result.has_key('metadata') and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace']
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if not isinstance(user_def[key], list):
+ return False
+
+ # lists should be identical
+ if value != user_def[key]:
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print "dict returned false not instance of dict"
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print api_values
+ print user_values
+ print "keys are not equal in dict"
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value)
+ if not result:
+ if debug:
+ print "dict returned false"
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if not user_def.has_key(key) or value != user_def[key]:
+ if debug:
+ print "value not equal; user_def does not have key"
+ print value
+ print user_def[key]
+ return False
+
+ return True
+
+class Secret(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
def __init__(self,
@@ -57,10 +192,11 @@ class OpenShiftOC(object):
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
+ super(Secret, OpenShiftCLI).__init__(namespace, kubeconfig)
self.namespace = namespace
self.name = secret_name
- self.verbose = verbose
self.kubeconfig = kubeconfig
+ self.verbose = verbose
def get_secrets(self):
'''return a secret by name '''
@@ -82,27 +218,17 @@ class OpenShiftOC(object):
'''return all pods '''
return self.oc_cmd(['delete', 'secrets', self.name, '-n', self.namespace])
- def secret_new(self, files):
+ def secret_new(self, files=None, contents=None):
'''Create a secret with all pods '''
+ if not files:
+ files = Utils.create_files_from_contents(contents)
+
secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files]
cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name]
cmd.extend(secrets)
return self.oc_cmd(cmd)
- @staticmethod
- def create_files_from_contents(data):
- '''Turn an array of dict: filename, content into a files array'''
- files = []
- for sfile in data:
- with open(sfile['path'], 'w') as fds:
- fds.write(sfile['content'])
- files.append(sfile['path'])
-
- # Register cleanup when module is done
- atexit.register(OpenShiftOC.cleanup, files)
- return files
-
def update_secret(self, files, force=False):
'''run update secret
@@ -113,7 +239,7 @@ class OpenShiftOC(object):
if secret['returncode'] != 0:
return secret
- sfile_path = '/tmp/%s' % secret['results']['metadata']['name']
+ sfile_path = '/tmp/%s' % self.name
with open(sfile_path, 'w') as sfd:
sfd.write(json.dumps(secret['results']))
@@ -121,144 +247,26 @@ class OpenShiftOC(object):
if force:
cmd = ['replace', '--force', '-f', sfile_path]
- atexit.register(OpenShiftOC.cleanup, [sfile_path])
+ atexit.register(Utils.cleanup, [sfile_path])
return self.oc_cmd(cmd)
- def prep_secret(self, files):
+ def prep_secret(self, files=None, contents=None):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
+ if not files:
+ files = Utils.create_files_from_contents(contents)
+
secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files]
cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name]
cmd.extend(secrets)
return self.oc_cmd(cmd, output=True)
- def oc_cmd(self, cmd, output=False):
- '''Base command for oc '''
- cmds = ['/usr/bin/oc']
- cmds.extend(cmd)
-
- results = ''
-
- if self.verbose:
- print ' '.join(cmds)
-
- proc = subprocess.Popen(cmds,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
- proc.wait()
- if proc.returncode == 0:
- if output:
- try:
- results = json.loads(proc.stdout.read())
- except ValueError as err:
- if "No JSON object could be decoded" in err.message:
- results = err.message
-
- if self.verbose:
- print proc.stderr.read()
- print results
- print
-
- return {"returncode": proc.returncode, "results": results}
-
- return {"returncode": proc.returncode,
- "stderr": proc.stderr.read(),
- "stdout": proc.stdout.read(),
- "results": {}
- }
-
- @staticmethod
- def cleanup(files):
- '''Clean up on exit '''
- for sfile in files:
- if os.path.exists(sfile):
- if os.path.isdir(sfile):
- shutil.rmtree(sfile)
- elif os.path.isfile(sfile):
- os.remove(sfile)
-
-
-def exists(results, _name):
- ''' Check to see if the results include the name '''
- if not results:
- return False
-
- if find_result(results, _name):
- return True
-
- return False
-
-def find_result(results, _name):
- ''' Find the specified result by name'''
- rval = None
- for result in results:
- #print "%s == %s" % (result['metadata']['name'], name)
- if result.has_key('metadata') and result['metadata']['name'] == _name:
- rval = result
- break
-
- return rval
-
-# Disabling too-many-branches. This is a yaml dictionary comparison function
-# pylint: disable=too-many-branches,too-many-return-statements
-def check_def_equal(user_def, result_def, debug=False):
- ''' Given a user defined definition, compare it with the results given back by our query. '''
-
- # Currently these values are autogenerated and we do not need to check them
- skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace']
-
- for key, value in result_def.items():
- if key in skip:
- continue
-
- # Both are lists
- if isinstance(value, list):
- if not isinstance(user_def[key], list):
- return False
-
- # lists should be identical
- if value != user_def[key]:
- return False
-
- # recurse on a dictionary
- elif isinstance(value, dict):
- if not isinstance(user_def[key], dict):
- if debug:
- print "dict returned false not instance of dict"
- return False
-
- # before passing ensure keys match
- api_values = set(value.keys()) - set(skip)
- user_values = set(user_def[key].keys()) - set(skip)
- if api_values != user_values:
- if debug:
- print api_values
- print user_values
- print "keys are not equal in dict"
- return False
-
- result = check_def_equal(user_def[key], value)
- if not result:
- if debug:
- print "dict returned false"
- return False
-
- # Verify each key, value pair is the same
- else:
- if not user_def.has_key(key) or value != user_def[key]:
- if debug:
- print "value not equal; user_def does not have key"
- print value
- print user_def[key]
- return False
-
- return True
+# pylint: disable=too-many-branches
def main():
'''
ansible oc module for secrets
@@ -281,10 +289,10 @@ def main():
supports_check_mode=True,
)
- occmd = OpenShiftOC(module.params['namespace'],
- module.params['name'],
- kubeconfig=module.params['kubeconfig'],
- verbose=module.params['debug'])
+ occmd = Secret(module.params['namespace'],
+ module.params['name'],
+ kubeconfig=module.params['kubeconfig'],
+ verbose=module.params['debug'])
state = module.params['state']
@@ -302,7 +310,7 @@ def main():
# Delete
########
if state == 'absent':
- if not exists(api_rval['results'], module.params['name']):
+ if not Utils.exists(api_rval['results'], module.params['name']):
module.exit_json(changed=False, state="absent")
if module.check_mode:
@@ -316,39 +324,39 @@ def main():
if module.params['files']:
files = module.params['files']
elif module.params['contents']:
- files = OpenShiftOC.create_files_from_contents(module.params['contents'])
+ files = Utils.create_files_from_contents(module.params['contents'])
else:
module.fail_json(msg='Either specify files or contents.')
########
# Create
########
- if not exists(api_rval['results'], module.params['name']):
+ if not Utils.exists(api_rval['results'], module.params['name']):
if module.check_mode:
module.exit_json(change=False, msg='Would have performed a create.')
- api_rval = occmd.secret_new(files)
+ api_rval = occmd.secret_new(module.params['files'], module.params['contents'])
# Remove files
if files and module.params['delete_after']:
- OpenShiftOC.cleanup(files)
+ Utils.cleanup(files)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
- secret = occmd.prep_secret(files)
+ secret = occmd.prep_secret(module.params['files'], module.params['contents'])
if secret['returncode'] != 0:
module.fail_json(msg=secret)
- if check_def_equal(secret['results'], api_rval['results'][0]):
+ if Utils.check_def_equal(secret['results'], api_rval['results'][0]):
# Remove files
if files and module.params['delete_after']:
- OpenShiftOC.cleanup(files)
+ Utils.cleanup(files)
module.exit_json(changed=False, results=secret['results'], state="present")
@@ -358,8 +366,8 @@ def main():
api_rval = occmd.update_secret(files, force=module.params['force'])
# Remove files
- if files and module.params['delete_after']:
- OpenShiftOC.cleanup(files)
+ if secret and module.params['delete_after']:
+ Utils.cleanup(files)
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
diff --git a/roles/lib_openshift_api/library/oc_service.py b/roles/lib_openshift_api/library/oc_service.py
new file mode 100644
index 000000000..48281f254
--- /dev/null
+++ b/roles/lib_openshift_api/library/oc_service.py
@@ -0,0 +1,356 @@
+#!/usr/bin/env python
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+import atexit
+import json
+import os
+import shutil
+import subprocess
+import yaml
+
+# The base class is here to share methods.
+# Currently there is only 1 but will grow in the future.
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the oc command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+
+ def oc_cmd(self, cmd, output=False):
+ '''Base command for oc '''
+ #cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
+ cmds = ['/usr/bin/oc']
+ cmds.extend(cmd)
+
+ results = ''
+
+ if self.verbose:
+ print ' '.join(cmds)
+
+ proc = subprocess.Popen(cmds,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+ proc.wait()
+ if proc.returncode == 0:
+ if output:
+ try:
+ results = json.loads(proc.stdout.read())
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.message:
+ results = err.message
+
+ if self.verbose:
+ print proc.stderr.read()
+ print results
+ print
+
+ return {"returncode": proc.returncode, "results": results}
+
+ return {"returncode": proc.returncode,
+ "stderr": proc.stderr.read(),
+ "stdout": proc.stdout.read(),
+ "results": {}
+ }
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_files_from_contents(data):
+ '''Turn an array of dict: filename, content into a files array'''
+ files = []
+
+ for sfile in data:
+ path = os.path.join('/tmp', sfile['path'])
+ with open(path, 'w') as fds:
+ fds.write(sfile['content'])
+ files.append(path)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, files)
+ return files
+
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ #print "%s == %s" % (result['metadata']['name'], name)
+ if result.has_key('metadata') and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace']
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if not isinstance(user_def[key], list):
+ return False
+
+ # lists should be identical
+ if value != user_def[key]:
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print "dict returned false not instance of dict"
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print api_values
+ print user_values
+ print "keys are not equal in dict"
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value)
+ if not result:
+ if debug:
+ print "dict returned false"
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if not user_def.has_key(key) or value != user_def[key]:
+ if debug:
+ print "value not equal; user_def does not have key"
+ print value
+ print user_def[key]
+ return False
+
+ return True
+
+class Service(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools
+ '''
+ def __init__(self,
+ namespace,
+ service_name=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(Service, OpenShiftCLI).__init__(namespace, kubeconfig)
+ self.namespace = namespace
+ self.name = service_name
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+
+ def create_service(self, sfile):
+ ''' create the service '''
+ return self.oc_cmd(['create', '-f', sfile])
+
+ def get_services(self):
+ '''return a secret by name '''
+ cmd = ['get', 'services', '-o', 'json', '-n', self.namespace]
+ if self.name:
+ cmd.append(self.name)
+
+ rval = self.oc_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if rval.has_key('items'):
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def delete_service(self):
+ '''return all pods '''
+ return self.oc_cmd(['delete', 'service', self.name, '-n', self.namespace])
+
+ def update_service(self, sfile, force=False):
+ '''run update service
+
+ This receives a list of file names and converts it into a secret.
+ The secret is then written to disk and passed into the `oc replace` command.
+ '''
+
+ cmd = ['replace', '-f', sfile]
+ if force:
+ cmd = ['replace', '--force', '-f', sfile]
+
+ atexit.register(Utils.cleanup, [sfile])
+
+ return self.oc_cmd(cmd)
+
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for services
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, type='str'),
+ service_file=dict(default=None, type='str'),
+ service_file_type=dict(default=None, type='str'),
+ delete_after=dict(default=False, type='bool'),
+ contents=dict(default=None, type='list'),
+ force=dict(default=False, type='bool'),
+ ),
+ mutually_exclusive=[["contents", "service_file"]],
+
+ supports_check_mode=True,
+ )
+ occmd = Service(module.params['namespace'],
+ module.params['name'],
+ kubeconfig=module.params['kubeconfig'],
+ verbose=module.params['debug'])
+
+ state = module.params['state']
+
+ api_rval = occmd.get_services()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ module.exit_json(changed=False, results=api_rval['results'], state="list")
+
+ if not module.params['name']:
+ module.fail_json(msg='Please specify a name when state is absent|present.')
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not Utils.exists(api_rval['results'], module.params['name']):
+ module.exit_json(changed=False, state="absent")
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed a delete.')
+
+ api_rval = occmd.delete_service()
+ module.exit_json(changed=True, results=api_rval, state="absent")
+
+
+ if state == 'present':
+ if module.params['service_file']:
+ sfile = module.params['service_file']
+ elif module.params['contents']:
+ sfile = Utils.create_files_from_contents(module.params['contents'])
+ else:
+ module.fail_json(msg='Either specify files or contents.')
+
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], module.params['name']):
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed a create.')
+
+ api_rval = occmd.create_service(sfile)
+
+ # Remove files
+ if sfile and module.params['delete_after']:
+ Utils.cleanup([sfile])
+
+ module.exit_json(changed=True, results=api_rval, state="present")
+
+ ########
+ # Update
+ ########
+ sfile_contents = Utils.get_resource_file(sfile, module.params['service_file_type'])
+ if Utils.check_def_equal(sfile_contents, api_rval['results'][0]):
+
+ # Remove files
+ if module.params['service_file'] and module.params['delete_after']:
+ Utils.cleanup([sfile])
+
+ module.exit_json(changed=False, results=api_rval['results'][0], state="present")
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed an update.')
+
+ api_rval = occmd.update_service(sfile, force=module.params['force'])
+
+ # Remove files
+ if sfile and module.params['delete_after']:
+ Utils.cleanup([sfile])
+
+ if api_rval['returncode'] != 0:
+ module.fail_json(msg=api_rval)
+
+
+ module.exit_json(changed=True, results=api_rval, state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 263daf210..30e29787a 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1407,6 +1407,7 @@ class OpenShiftFacts(object):
if 'node' in roles:
defaults['node'] = dict(labels={}, annotations={},
iptables_sync_period='5s',
+ local_quota_per_fsgroup="",
set_node_ip=False)
if 'docker' in roles:
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 4b5832ab7..ca1e26459 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -31,6 +31,7 @@
node_image: "{{ osn_image | default(None) }}"
ovs_image: "{{ osn_ovs_image | default(None) }}"
proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
+ local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 67975d372..28cb1ea26 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -38,3 +38,6 @@ volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes
proxyArguments:
proxy-mode:
- {{ openshift.node.proxy_mode }}
+volumeConfig:
+ localQuota:
+ perFSGroup: {{ openshift.node.local_quota_per_fsgroup }}
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 53b1d6230..65d2291bb 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -12,7 +12,7 @@ Wants={{ openshift.common.service_type }}-master.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
index f34fa7b74..5dd28d52a 100644
--- a/roles/openshift_serviceaccounts/tasks/main.yml
+++ b/roles/openshift_serviceaccounts/tasks/main.yml
@@ -9,7 +9,8 @@
- name: create the service account
shell: >
echo {{ lookup('template', '../templates/serviceaccount.j2')
- | from_yaml | to_json | quote }} | {{ openshift.common.client_binary }} create -f -
+ | from_yaml | to_json | quote }} | {{ openshift.common.client_binary }}
+ -n {{ openshift_serviceaccounts_namespace }} create -f -
when: item.1.rc != 0
with_together:
- openshift_serviceaccounts_names