diff options
Diffstat (limited to 'inventory')
-rw-r--r-- | inventory/byo/hosts | 6 | ||||
-rwxr-xr-x | inventory/multi_ec2.py | 185 | ||||
-rw-r--r-- | inventory/multi_ec2.yaml.example | 19 | ||||
-rw-r--r-- | inventory/openshift-ansible-inventory.spec | 29 |
4 files changed, 190 insertions, 49 deletions
diff --git a/inventory/byo/hosts b/inventory/byo/hosts index 98dbb4fd8..728eec8aa 100644 --- a/inventory/byo/hosts +++ b/inventory/byo/hosts @@ -20,7 +20,8 @@ deployment_type=enterprise openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version} # Pre-release additional repo -openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] +#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] +openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] # Origin copr repo #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] @@ -31,4 +32,5 @@ ose3-master-ansible.test.example.com # host group for nodes [nodes] -ose3-node[1:2]-ansible.test.example.com +ose3-master-ansible.test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}" +ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py index b839a33ea..f8196aefd 100755 --- a/inventory/multi_ec2.py +++ b/inventory/multi_ec2.py @@ -11,9 +11,13 @@ import yaml import os import subprocess import json - +import errno +import fcntl +import tempfile +import copy CONFIG_FILE_NAME = 'multi_ec2.yaml' +DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache') class MultiEc2(object): ''' @@ -22,12 +26,17 @@ class MultiEc2(object): Stores a json hash of resources in result. ''' - def __init__(self): - self.args = None + def __init__(self, args=None): + # Allow args to be passed when called as a library + if not args: + self.args = {} + else: + self.args = args + + self.cache_path = DEFAULT_CACHE_PATH self.config = None self.all_ec2_results = {} self.result = {} - self.cache_path = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache') self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME) @@ -41,17 +50,26 @@ class MultiEc2(object): else: self.config_file = None # expect env vars - self.parse_cli_args() + def run(self): + '''This method checks to see if the local + cache is valid for the inventory. + + if the cache is valid; return cache + else the credentials are loaded from multi_ec2.yaml or from the env + and we attempt to get the inventory from the provider specified. + ''' # load yaml if self.config_file and os.path.isfile(self.config_file): self.config = self.load_yaml_config() elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \ os.environ.has_key("AWS_SECRET_ACCESS_KEY"): + # Build a default config self.config = {} self.config['accounts'] = [ { 'name': 'default', + 'cache_location': DEFAULT_CACHE_PATH, 'provider': 'aws/hosts/ec2.py', 'env_vars': { 'AWS_ACCESS_KEY_ID': os.environ["AWS_ACCESS_KEY_ID"], @@ -64,11 +82,15 @@ class MultiEc2(object): else: raise RuntimeError("Could not find valid ec2 credentials in the environment.") - if self.args.refresh_cache: + # Set the default cache path but if its defined we'll assign it. + if self.config.has_key('cache_location'): + self.cache_path = self.config['cache_location'] + + if self.args.get('refresh_cache', None): self.get_inventory() self.write_to_cache() # if its a host query, fetch and do not cache - elif self.args.host: + elif self.args.get('host', None): self.get_inventory() elif not self.is_cache_valid(): # go fetch the inventories and cache them if cache is expired @@ -109,9 +131,9 @@ class MultiEc2(object): "and that it is executable. (%s)" % provider) cmds = [provider] - if self.args.host: + if self.args.get('host', None): cmds.append("--host") - cmds.append(self.args.host) + cmds.append(self.args.get('host', None)) else: cmds.append('--list') @@ -119,6 +141,54 @@ class MultiEc2(object): return subprocess.Popen(cmds, stderr=subprocess.PIPE, \ stdout=subprocess.PIPE, env=env) + + @staticmethod + def generate_config(config_data): + """Generate the ec2.ini file in as a secure temp file. + Once generated, pass it to the ec2.py as an environment variable. + """ + fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.') + for section, values in config_data.items(): + os.write(fildes, "[%s]\n" % section) + for option, value in values.items(): + os.write(fildes, "%s = %s\n" % (option, value)) + os.close(fildes) + return tmp_file_path + + def run_provider(self): + '''Setup the provider call with proper variables + and call self.get_provider_tags. + ''' + try: + all_results = [] + tmp_file_paths = [] + processes = {} + for account in self.config['accounts']: + env = account['env_vars'] + if account.has_key('provider_config'): + tmp_file_paths.append(MultiEc2.generate_config(account['provider_config'])) + env['EC2_INI_PATH'] = tmp_file_paths[-1] + name = account['name'] + provider = account['provider'] + processes[name] = self.get_provider_tags(provider, env) + + # for each process collect stdout when its available + for name, process in processes.items(): + out, err = process.communicate() + all_results.append({ + "name": name, + "out": out.strip(), + "err": err.strip(), + "code": process.returncode + }) + + finally: + # Clean up the mkstemp file + for tmp_file in tmp_file_paths: + os.unlink(tmp_file) + + return all_results + def get_inventory(self): """Create the subprocess to fetch tags from a provider. Host query: @@ -129,46 +199,61 @@ class MultiEc2(object): Query all of the different accounts for their tags. Once completed store all of their results into one merged updated hash. """ - processes = {} - for account in self.config['accounts']: - env = account['env_vars'] - name = account['name'] - provider = account['provider'] - processes[name] = self.get_provider_tags(provider, env) - - # for each process collect stdout when its available - all_results = [] - for name, process in processes.items(): - out, err = process.communicate() - all_results.append({ - "name": name, - "out": out.strip(), - "err": err.strip(), - "code": process.returncode - }) + provider_results = self.run_provider() # process --host results - if not self.args.host: + # For any 0 result, return it + if self.args.get('host', None): + count = 0 + for results in provider_results: + if results['code'] == 0 and results['err'] == '' and results['out'] != '{}': + self.result = json.loads(results['out']) + count += 1 + if count > 1: + raise RuntimeError("Found > 1 results for --host %s. \ + This is an invalid state." % self.args.get('host', None)) + # process --list results + else: # For any non-zero, raise an error on it - for result in all_results: + for result in provider_results: if result['code'] != 0: raise RuntimeError(result['err']) else: self.all_ec2_results[result['name']] = json.loads(result['out']) + + # Check if user wants extra vars in yaml by + # having hostvars and all_group defined + for acc_config in self.config['accounts']: + self.apply_account_config(acc_config) + + # Build results by merging all dictionaries values = self.all_ec2_results.values() values.insert(0, self.result) for result in values: MultiEc2.merge_destructively(self.result, result) - else: - # For any 0 result, return it - count = 0 - for results in all_results: - if results['code'] == 0 and results['err'] == '' and results['out'] != '{}': - self.result = json.loads(out) - count += 1 - if count > 1: - raise RuntimeError("Found > 1 results for --host %s. \ - This is an invalid state." % self.args.host) + + def apply_account_config(self, acc_config): + ''' Apply account config settings + ''' + if not acc_config.has_key('hostvars') and not acc_config.has_key('all_group'): + return + + results = self.all_ec2_results[acc_config['name']] + # Update each hostvar with the newly desired key: value + for host_property, value in acc_config['hostvars'].items(): + # Verify the account results look sane + # by checking for these keys ('_meta' and 'hostvars' exist) + if results.has_key('_meta') and results['_meta'].has_key('hostvars'): + for data in results['_meta']['hostvars'].values(): + data[str(host_property)] = str(value) + + # Add this group + results["%s_%s" % (host_property, value)] = \ + copy.copy(results[acc_config['all_group']]) + + # store the results back into all_ec2_results + self.all_ec2_results[acc_config['name']] = results + @staticmethod def merge_destructively(input_a, input_b): "merges b into input_a" @@ -182,7 +267,7 @@ class MultiEc2(object): elif isinstance(input_a[key], list) and isinstance(input_b[key], list): for result in input_b[key]: if result not in input_a[key]: - input_a[key].input_append(result) + input_a[key].append(result) # a is a list and not b elif isinstance(input_a[key], list): if input_b[key] not in input_a[key]: @@ -217,14 +302,27 @@ class MultiEc2(object): help='List instances (default: True)') parser.add_argument('--host', action='store', default=False, help='Get all the variables about a specific instance') - self.args = parser.parse_args() + self.args = parser.parse_args().__dict__ def write_to_cache(self): ''' Writes data in JSON format to a file ''' + # if it does not exist, try and create it. + if not os.path.isfile(self.cache_path): + path = os.path.dirname(self.cache_path) + try: + os.makedirs(path) + except OSError as exc: + if exc.errno != errno.EEXIST or not os.path.isdir(path): + raise + json_data = MultiEc2.json_format_dict(self.result, True) with open(self.cache_path, 'w') as cache: - cache.write(json_data) + try: + fcntl.flock(cache, fcntl.LOCK_EX) + cache.write(json_data) + finally: + fcntl.flock(cache, fcntl.LOCK_UN) def get_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON @@ -254,4 +352,7 @@ class MultiEc2(object): if __name__ == "__main__": - print MultiEc2().result_str() + MEC2 = MultiEc2() + MEC2.parse_cli_args() + MEC2.run() + print MEC2.result_str() diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example index 91e7c7970..99f157b11 100644 --- a/inventory/multi_ec2.yaml.example +++ b/inventory/multi_ec2.yaml.example @@ -1,15 +1,32 @@ # multi ec2 inventory configs +# +cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache + accounts: - name: aws1 provider: aws/hosts/ec2.py + provider_config: + ec2: + regions: all + regions_exclude: us-gov-west-1,cn-north-1 + destination_variable: public_dns_name + route53: False + cache_path: ~/.ansible/tmp + cache_max_age: 300 + vpc_destination_variable: ip_address env_vars: AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + all_group: ec2 + hostvars: + cloud: aws + account: aws1 - - name: aws2 +- name: aws2 provider: aws/hosts/ec2.py env_vars: AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + EC2_INI_PATH: /etc/ansible/ec2.ini cache_max_age: 60 diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec index 8267e16f6..cd2332549 100644 --- a/inventory/openshift-ansible-inventory.spec +++ b/inventory/openshift-ansible-inventory.spec @@ -1,6 +1,6 @@ Summary: OpenShift Ansible Inventories Name: openshift-ansible-inventory -Version: 0.0.2 +Version: 0.0.7 Release: 1%{?dist} License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -25,18 +25,39 @@ mkdir -p %{buildroot}/usr/share/ansible/inventory/gce cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml -cp -p aws/ec2.py aws/ec2.ini %{buildroot}/usr/share/ansible/inventory/aws -cp -p gce/gce.py %{buildroot}/usr/share/ansible/inventory/gce +cp -p aws/hosts/ec2.py %{buildroot}/usr/share/ansible/inventory/aws +cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce %files %config(noreplace) /etc/ansible/* %dir /usr/share/ansible/inventory /usr/share/ansible/inventory/multi_ec2.py* /usr/share/ansible/inventory/aws/ec2.py* -%config(noreplace) /usr/share/ansible/inventory/aws/ec2.ini /usr/share/ansible/inventory/gce/gce.py* %changelog +* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.7-1 +- Making multi_ec2 into a library (kwoodson@redhat.com) + +* Wed May 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1 +- Added support for grouping and a bug fix. (kwoodson@redhat.com) + +* Tue May 12 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1 +- removed ec2.ini from the openshift-ansible-inventory.spec file so that we're + not dictating what the ec2.ini file should look like. (twiest@redhat.com) +- Added capability to pass in ec2.ini file. (kwoodson@redhat.com) + +* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1 +- Fixed a bug due to renaming of variables. (kwoodson@redhat.com) + +* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1 +- fixed build problems with openshift-ansible-inventory.spec + (twiest@redhat.com) +- Allow option in multi_ec2 to set cache location. (kwoodson@redhat.com) +- Add ansible_connection=local to localhost in inventory (jdetiber@redhat.com) +- Adding refresh-cache option and cleanup for pylint. Also updated for + aws/hosts/ being added. (kwoodson@redhat.com) + * Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1 - added the ability to have a config file in /etc/openshift_ansible to multi_ec2.py. (twiest@redhat.com) |