diff options
Diffstat (limited to 'inventory')
-rw-r--r-- | inventory/aws/hosts/ec2.ini | 97 | ||||
-rwxr-xr-x | inventory/aws/hosts/ec2.py | 645 | ||||
-rw-r--r-- | inventory/byo/hosts.aep.example | 286 | ||||
-rw-r--r-- | inventory/byo/hosts.aep_quickstart | 20 | ||||
-rw-r--r-- | inventory/byo/hosts.example | 87 | ||||
-rw-r--r-- | inventory/byo/hosts.openstack | 37 | ||||
-rw-r--r-- | inventory/byo/hosts.origin.example | 291 | ||||
-rw-r--r-- | inventory/byo/hosts.ose.example | 287 | ||||
-rwxr-xr-x | inventory/gce/hosts/gce.py | 27 | ||||
-rw-r--r-- | inventory/multi_ec2.yaml.example | 32 | ||||
-rwxr-xr-x | inventory/multi_inventory.py (renamed from inventory/multi_ec2.py) | 180 | ||||
-rw-r--r-- | inventory/multi_inventory.yaml.example | 51 | ||||
-rw-r--r-- | inventory/openshift-ansible-inventory.spec | 108 | ||||
-rwxr-xr-x | inventory/openstack/hosts/nova.py | 2 |
14 files changed, 1791 insertions, 359 deletions
diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini index eaab0a410..aa0f9090f 100644 --- a/inventory/aws/hosts/ec2.ini +++ b/inventory/aws/hosts/ec2.ini @@ -24,24 +24,61 @@ regions_exclude = us-gov-west-1,cn-north-1 # This is the normal destination variable to use. If you are running Ansible # from outside EC2, then 'public_dns_name' makes the most sense. If you are # running Ansible from within EC2, then perhaps you want to use the internal -# address, and should set this to 'private_dns_name'. +# address, and should set this to 'private_dns_name'. The key of an EC2 tag +# may optionally be used; however the boto instance variables hold precedence +# in the event of a collision. destination_variable = public_dns_name # For server inside a VPC, using DNS names may not make sense. When an instance # has 'subnet_id' set, this variable is used. If the subnet is public, setting # this to 'ip_address' will return the public IP address. For instances in a # private subnet, this should be set to 'private_ip_address', and Ansible must -# be run from with EC2. +# be run from within EC2. The key of an EC2 tag may optionally be used; however +# the boto instance variables hold precedence in the event of a collision. +# WARNING: - instances that are in the private vpc, _without_ public ip address +# will not be listed in the inventory until You set: +# vpc_destination_variable = 'private_ip_address' vpc_destination_variable = ip_address # To tag instances on EC2 with the resource records that point to them from # Route53, uncomment and set 'route53' to True. route53 = False +# To exclude RDS instances from the inventory, uncomment and set to False. +rds = False + +# To exclude ElastiCache instances from the inventory, uncomment and set to False. +elasticache = False + # Additionally, you can specify the list of zones to exclude looking up in # 'route53_excluded_zones' as a comma-separated list. # route53_excluded_zones = samplezone1.com, samplezone2.com +# By default, only EC2 instances in the 'running' state are returned. Set +# 'all_instances' to True to return all instances regardless of state. +all_instances = False + +# By default, only EC2 instances in the 'running' state are returned. Specify +# EC2 instance states to return as a comma-separated list. This +# option is overriden when 'all_instances' is True. +# instance_states = pending, running, shutting-down, terminated, stopping, stopped + +# By default, only RDS instances in the 'available' state are returned. Set +# 'all_rds_instances' to True return all RDS instances regardless of state. +all_rds_instances = False + +# By default, only ElastiCache clusters and nodes in the 'available' state +# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' +# to True return all ElastiCache clusters and nodes, regardless of state. +# +# Note that all_elasticache_nodes only applies to listed clusters. That means +# if you set all_elastic_clusters to false, no node will be return from +# unavailable clusters, regardless of the state and to what you set for +# all_elasticache_nodes. +all_elasticache_replication_groups = False +all_elasticache_clusters = False +all_elasticache_nodes = False + # API calls to EC2 are slow. For this reason, we cache the results of an API # call. Set this to the path you want cache files to be written to. Two files # will be written to this directory: @@ -60,3 +97,59 @@ cache_max_age = 300 # destination_variable and vpc_destination_variable. # destination_format = {0}.{1}.rhcloud.com # destination_format_tags = Name,environment + +# Organize groups into a nested/hierarchy instead of a flat namespace. +nested_groups = False + +# Replace - tags when creating groups to avoid issues with ansible +replace_dash_in_groups = False + +# The EC2 inventory output can become very large. To manage its size, +# configure which groups should be created. +group_by_instance_id = True +group_by_region = True +group_by_availability_zone = True +group_by_ami_id = True +group_by_instance_type = True +group_by_key_pair = True +group_by_vpc_id = True +group_by_security_group = True +group_by_tag_keys = True +group_by_tag_none = True +group_by_route53_names = True +group_by_rds_engine = True +group_by_rds_parameter_group = True +group_by_elasticache_engine = True +group_by_elasticache_cluster = True +group_by_elasticache_parameter_group = True +group_by_elasticache_replication_group = True + +# If you only want to include hosts that match a certain regular expression +# pattern_include = staging-* + +# If you want to exclude any hosts that match a certain regular expression +# pattern_exclude = staging-* + +# Instance filters can be used to control which instances are retrieved for +# inventory. For the full list of possible filters, please read the EC2 API +# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters +# Filters are key/value pairs separated by '=', to list multiple filters use +# a list separated by commas. See examples below. + +# Retrieve only instances with (key=value) env=staging tag +# instance_filters = tag:env=staging + +# Retrieve only instances with role=webservers OR role=dbservers tag +# instance_filters = tag:role=webservers,tag:role=dbservers + +# Retrieve only t1.micro instances OR instances with tag env=staging +# instance_filters = instance-type=t1.micro,tag:env=staging + +# You can use wildcards in filter values also. Below will list instances which +# tag Name value matches webservers1* +# (ex. webservers15, webservers1a, webservers123 etc) +# instance_filters = tag:Name=webservers1* + +# A boto configuration profile may be used to separate out credentials +# see http://boto.readthedocs.org/en/latest/boto_config_tut.html +# boto_profile = some-boto-profile-name diff --git a/inventory/aws/hosts/ec2.py b/inventory/aws/hosts/ec2.py index f231ff4c2..8b878cafd 100755 --- a/inventory/aws/hosts/ec2.py +++ b/inventory/aws/hosts/ec2.py @@ -22,6 +22,12 @@ you need to define: export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus +If you're using boto profiles (requires boto>=2.24.0) you can choose a profile +using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using +the AWS_PROFILE variable: + + AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml + For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html When run against a specific host, this script returns the following variables: @@ -121,8 +127,11 @@ from time import time import boto from boto import ec2 from boto import rds +from boto import elasticache from boto import route53 -import ConfigParser +import six + +from six.moves import configparser from collections import defaultdict try: @@ -145,9 +154,18 @@ class Ec2Inventory(object): # Index of hostname (address) to instance ID self.index = {} + # Boto profile to use (if any) + self.boto_profile = None + # Read settings and parse CLI arguments - self.read_settings() self.parse_cli_args() + self.read_settings() + + # Make sure that profile_name is not passed at all if not set + # as pre 2.24 boto will fall over otherwise + if self.boto_profile: + if not hasattr(boto.ec2.EC2Connection, 'profile_name'): + self.fail_with_error("boto version must be >= 2.24 to use profile") # Cache if self.args.refresh_cache: @@ -166,7 +184,7 @@ class Ec2Inventory(object): else: data_to_print = self.json_format_dict(self.inventory, True) - print data_to_print + print(data_to_print) def is_cache_valid(self): @@ -184,10 +202,12 @@ class Ec2Inventory(object): def read_settings(self): ''' Reads the settings from the ec2.ini file ''' - - config = ConfigParser.SafeConfigParser() + if six.PY3: + config = configparser.ConfigParser() + else: + config = configparser.SafeConfigParser() ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') - ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path) + ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path))) config.read(ec2_ini_path) # is eucalyptus? @@ -236,18 +256,72 @@ class Ec2Inventory(object): if config.has_option('ec2', 'rds'): self.rds_enabled = config.getboolean('ec2', 'rds') - # Return all EC2 and RDS instances (if RDS is enabled) + # Include ElastiCache instances? + self.elasticache_enabled = True + if config.has_option('ec2', 'elasticache'): + self.elasticache_enabled = config.getboolean('ec2', 'elasticache') + + # Return all EC2 instances? if config.has_option('ec2', 'all_instances'): self.all_instances = config.getboolean('ec2', 'all_instances') else: self.all_instances = False + + # Instance states to be gathered in inventory. Default is 'running'. + # Setting 'all_instances' to 'yes' overrides this option. + ec2_valid_instance_states = [ + 'pending', + 'running', + 'shutting-down', + 'terminated', + 'stopping', + 'stopped' + ] + self.ec2_instance_states = [] + if self.all_instances: + self.ec2_instance_states = ec2_valid_instance_states + elif config.has_option('ec2', 'instance_states'): + for instance_state in config.get('ec2', 'instance_states').split(','): + instance_state = instance_state.strip() + if instance_state not in ec2_valid_instance_states: + continue + self.ec2_instance_states.append(instance_state) + else: + self.ec2_instance_states = ['running'] + + # Return all RDS instances? (if RDS is enabled) if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') else: self.all_rds_instances = False + # Return all ElastiCache replication groups? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: + self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') + else: + self.all_elasticache_replication_groups = False + + # Return all ElastiCache clusters? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: + self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') + else: + self.all_elasticache_clusters = False + + # Return all ElastiCache nodes? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: + self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') + else: + self.all_elasticache_nodes = False + + # boto configuration profile (prefer CLI argument) + self.boto_profile = self.args.boto_profile + if config.has_option('ec2', 'boto_profile') and not self.boto_profile: + self.boto_profile = config.get('ec2', 'boto_profile') + # Cache related cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) + if self.boto_profile: + cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) if not os.path.exists(cache_dir): os.makedirs(cache_dir) @@ -261,6 +335,12 @@ class Ec2Inventory(object): else: self.nested_groups = False + # Replace dash or not in group names + if config.has_option('ec2', 'replace_dash_in_groups'): + self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') + else: + self.replace_dash_in_groups = True + # Configure which groups should be created. group_by_options = [ 'group_by_instance_id', @@ -276,6 +356,10 @@ class Ec2Inventory(object): 'group_by_route53_names', 'group_by_rds_engine', 'group_by_rds_parameter_group', + 'group_by_elasticache_engine', + 'group_by_elasticache_cluster', + 'group_by_elasticache_parameter_group', + 'group_by_elasticache_replication_group', ] for option in group_by_options: if config.has_option('ec2', option): @@ -290,7 +374,7 @@ class Ec2Inventory(object): self.pattern_include = re.compile(pattern_include) else: self.pattern_include = None - except ConfigParser.NoOptionError, e: + except configparser.NoOptionError: self.pattern_include = None # Do we need to exclude hosts that match a pattern? @@ -300,7 +384,7 @@ class Ec2Inventory(object): self.pattern_exclude = re.compile(pattern_exclude) else: self.pattern_exclude = None - except ConfigParser.NoOptionError, e: + except configparser.NoOptionError: self.pattern_exclude = None # Instance filters (see boto and EC2 API docs). Ignore invalid filters. @@ -325,6 +409,8 @@ class Ec2Inventory(object): help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') + parser.add_argument('--boto-profile', action='store', + help='Use boto profile for connections to EC2') self.args = parser.parse_args() @@ -338,30 +424,52 @@ class Ec2Inventory(object): self.get_instances_by_region(region) if self.rds_enabled: self.get_rds_instances_by_region(region) + if self.elasticache_enabled: + self.get_elasticache_clusters_by_region(region) + self.get_elasticache_replication_groups_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) + def connect(self, region): + ''' create connection to api server''' + if self.eucalyptus: + conn = boto.connect_euca(host=self.eucalyptus_host) + conn.APIVersion = '2010-08-31' + else: + conn = self.connect_to_aws(ec2, region) + return conn + + def boto_fix_security_token_in_profile(self, connect_args): + ''' monkey patch for boto issue boto/boto#2100 ''' + profile = 'profile ' + self.boto_profile + if boto.config.has_option(profile, 'aws_security_token'): + connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') + return connect_args + + def connect_to_aws(self, module, region): + connect_args = {} + + # only pass the profile name if it's set (as it is not supported by older boto versions) + if self.boto_profile: + connect_args['profile_name'] = self.boto_profile + self.boto_fix_security_token_in_profile(connect_args) + + conn = module.connect_to_region(region, **connect_args) + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + return conn def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: - if self.eucalyptus: - conn = boto.connect_euca(host=self.eucalyptus_host) - conn.APIVersion = '2010-08-31' - else: - conn = ec2.connect_to_region(region) - - # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported - if conn is None: - print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) - sys.exit(1) - + conn = self.connect(region) reservations = [] if self.ec2_instance_filters: - for filter_key, filter_values in self.ec2_instance_filters.iteritems(): + for filter_key, filter_values in self.ec2_instance_filters.items(): reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) else: reservations = conn.get_all_instances() @@ -370,40 +478,130 @@ class Ec2Inventory(object): for instance in reservation.instances: self.add_instance(instance, region) - except boto.exception.BotoServerError, e: - if not self.eucalyptus: - print "Looks like AWS is down again:" - print e - sys.exit(1) + except boto.exception.BotoServerError as e: + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + else: + backend = 'Eucalyptus' if self.eucalyptus else 'AWS' + error = "Error connecting to %s backend.\n%s" % (backend, e.message) + self.fail_with_error(error, 'getting EC2 instances') def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular region ''' try: - conn = rds.connect_to_region(region) + conn = self.connect_to_aws(rds, region) if conn: instances = conn.get_all_dbinstances() for instance in instances: self.add_rds_instance(instance, region) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() if not e.reason == "Forbidden": - print "Looks like AWS RDS is down: " - print e - sys.exit(1) + error = "Looks like AWS RDS is down:\n%s" % e.message + self.fail_with_error(error, 'getting RDS instances') - def get_instance(self, region, instance_id): - ''' Gets details about a specific instance ''' - if self.eucalyptus: - conn = boto.connect_euca(self.eucalyptus_host) - conn.APIVersion = '2010-08-31' + def get_elasticache_clusters_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache clusters (with + nodes' info) in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_intances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = elasticache.connect_to_region(region) + if conn: + # show_cache_node_info = True + # because we also want nodes' information + response = conn.describe_cache_clusters(None, None, None, True) + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS ElastiCache is down:\n%s" % e.message + self.fail_with_error(error, 'getting ElastiCache clusters') + + try: + # Boto also doesn't provide wrapper classes to CacheClusters or + # CacheNodes. Because of that wo can't make use of the get_list + # method in the AWSQueryConnection. Let's do the work manually + clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] + + except KeyError as e: + error = "ElastiCache query to AWS failed (unexpected format)." + self.fail_with_error(error, 'getting ElastiCache clusters') + + for cluster in clusters: + self.add_elasticache_cluster(cluster, region) + + def get_elasticache_replication_groups_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache replication groups + in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_intances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = elasticache.connect_to_region(region) + if conn: + response = conn.describe_replication_groups() + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message + self.fail_with_error(error, 'getting ElastiCache clusters') + + try: + # Boto also doesn't provide wrapper classes to ReplicationGroups + # Because of that wo can't make use of the get_list method in the + # AWSQueryConnection. Let's do the work manually + replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] + + except KeyError as e: + error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." + self.fail_with_error(error, 'getting ElastiCache clusters') + + for replication_group in replication_groups: + self.add_elasticache_replication_group(replication_group, region) + + def get_auth_error_message(self): + ''' create an informative error message if there is an issue authenticating''' + errors = ["Authentication error retrieving ec2 inventory."] + if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: + errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') else: - conn = ec2.connect_to_region(region) + errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') - # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported - if conn is None: - print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) - sys.exit(1) + boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] + boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) + if len(boto_config_found) > 0: + errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) + else: + errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) + + return '\n'.join(errors) + + def fail_with_error(self, err_msg, err_operation=None): + '''log an error to std err for ansible-playbook to consume and exit''' + if err_operation: + err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( + err_msg=err_msg, err_operation=err_operation) + sys.stderr.write(err_msg) + sys.exit(1) + + def get_instance(self, region, instance_id): + conn = self.connect(region) reservations = conn.get_all_instances([instance_id]) for reservation in reservations: @@ -414,8 +612,8 @@ class Ec2Inventory(object): ''' Adds an instance to the inventory and index, as long as it is addressable ''' - # Only want running instances unless all_instances is True - if not self.all_instances and instance.state != 'running': + # Only return instances with desired instance states + if instance.state not in self.ec2_instance_states: return # Select the best destination address @@ -502,18 +700,21 @@ class Ec2Inventory(object): if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' - sys.exit(1) + self.fail_with_error('\n'.join(['Package boto seems a bit older.', + 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by tag keys if self.group_by_tag_keys: - for k, v in instance.tags.iteritems(): - key = self.to_safe("tag_" + k + "=" + v) + for k, v in instance.tags.items(): + if v: + key = self.to_safe("tag_" + k + "=" + v) + else: + key = self.to_safe("tag_" + k) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) - self.push_group(self.inventory, self.to_safe("tag_" + k), key) + if v: + self.push_group(self.inventory, self.to_safe("tag_" + k), key) # Inventory: Group by Route53 domain names if enabled if self.route53_enabled and self.group_by_route53_names: @@ -597,9 +798,9 @@ class Ec2Inventory(object): self.push_group(self.inventory, 'security_groups', key) except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' - sys.exit(1) + self.fail_with_error('\n'.join(['Package boto seems a bit older.', + 'Please upgrade boto >= 2.3.0.'])) + # Inventory: Group by engine if self.group_by_rds_engine: @@ -618,6 +819,243 @@ class Ec2Inventory(object): self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + def add_elasticache_cluster(self, cluster, region): + ''' Adds an ElastiCache cluster to the inventory and index, as long as + it's nodes are addressable ''' + + # Only want available clusters unless all_elasticache_clusters is True + if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': + return + + # Select the best destination address + if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: + # Memcached cluster + dest = cluster['ConfigurationEndpoint']['Address'] + is_redis = False + else: + # Redis sigle node cluster + # Because all Redis clusters are single nodes, we'll merge the + # info from the cluster with info about the node + dest = cluster['CacheNodes'][0]['Endpoint']['Address'] + is_redis = True + + if not dest: + # Skip clusters we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, cluster['CacheClusterId']] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[cluster['CacheClusterId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) + + # Inventory: Group by region + if self.group_by_region and not is_redis: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone and not is_redis: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type and not is_redis: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) + + # Inventory: Group by security group + if self.group_by_security_group and not is_redis: + + # Check for the existence of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine and not is_redis: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) + + # Inventory: Group by parameter group + if self.group_by_elasticache_parameter_group: + self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) + + # Inventory: Group by replication group + if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: + self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) + + host_info = self.get_host_info_dict_from_describe_dict(cluster) + + self.inventory["_meta"]["hostvars"][dest] = host_info + + # Add the nodes + for node in cluster['CacheNodes']: + self.add_elasticache_node(node, cluster, region) + + def add_elasticache_node(self, node, cluster, region): + ''' Adds an ElastiCache node to the inventory and index, as long as + it is addressable ''' + + # Only want available nodes unless all_elasticache_nodes is True + if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': + return + + # Select the best destination address + dest = node['Endpoint']['Address'] + + if not dest: + # Skip nodes we cannot address (e.g. private VPC subnet) + return + + node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) + + # Add to index + self.index[dest] = [region, node_id] + + # Inventory: Group by node ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[node_id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', node_id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) + + # Inventory: Group by security group + if self.group_by_security_group: + + # Check for the existence of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) + + # Inventory: Group by parameter group (done at cluster level) + + # Inventory: Group by replication group (done at cluster level) + + # Inventory: Group by ElastiCache Cluster + if self.group_by_elasticache_cluster: + self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) + + # Global Tag: all ElastiCache nodes + self.push(self.inventory, 'elasticache_nodes', dest) + + host_info = self.get_host_info_dict_from_describe_dict(node) + + if dest in self.inventory["_meta"]["hostvars"]: + self.inventory["_meta"]["hostvars"][dest].update(host_info) + else: + self.inventory["_meta"]["hostvars"][dest] = host_info + + def add_elasticache_replication_group(self, replication_group, region): + ''' Adds an ElastiCache replication group to the inventory and index ''' + + # Only want available clusters unless all_elasticache_replication_groups is True + if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': + return + + # Select the best destination address (PrimaryEndpoint) + dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] + + if not dest: + # Skip clusters we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, replication_group['ReplicationGroupId']] + + # Inventory: Group by ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[replication_group['ReplicationGroupId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone (doesn't apply to replication groups) + + # Inventory: Group by node type (doesn't apply to replication groups) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for replication groups + + # Inventory: Group by security group (doesn't apply to replication groups) + # Check this value in cluster level + + # Inventory: Group by engine (replication groups are always Redis) + if self.group_by_elasticache_engine: + self.push(self.inventory, 'elasticache_redis', dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', 'redis') + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) + + host_info = self.get_host_info_dict_from_describe_dict(replication_group) + + self.inventory["_meta"]["hostvars"][dest] = host_info def get_route53_records(self): ''' Get and store the map of resource records to domain names that @@ -666,7 +1104,6 @@ class Ec2Inventory(object): return list(name_list) - def get_host_info_dict_from_instance(self, instance): instance_vars = {} for key in vars(instance): @@ -683,7 +1120,7 @@ class Ec2Inventory(object): instance_vars['ec2_previous_state_code'] = instance.previous_state_code elif type(value) in [int, bool]: instance_vars[key] = value - elif type(value) in [str, unicode]: + elif isinstance(value, six.string_types): instance_vars[key] = value.strip() elif type(value) == type(None): instance_vars[key] = '' @@ -692,7 +1129,7 @@ class Ec2Inventory(object): elif key == 'ec2__placement': instance_vars['ec2_placement'] = value.zone elif key == 'ec2_tags': - for k, v in value.iteritems(): + for k, v in value.items(): key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': @@ -712,6 +1149,91 @@ class Ec2Inventory(object): return instance_vars + def get_host_info_dict_from_describe_dict(self, describe_dict): + ''' Parses the dictionary returned by the API call into a flat list + of parameters. This method should be used only when 'describe' is + used directly because Boto doesn't provide specific classes. ''' + + # I really don't agree with prefixing everything with 'ec2' + # because EC2, RDS and ElastiCache are different services. + # I'm just following the pattern used until now to not break any + # compatibility. + + host_info = {} + for key in describe_dict: + value = describe_dict[key] + key = self.to_safe('ec2_' + self.uncammelize(key)) + + # Handle complex types + + # Target: Memcached Cache Clusters + if key == 'ec2_configuration_endpoint' and value: + host_info['ec2_configuration_endpoint_address'] = value['Address'] + host_info['ec2_configuration_endpoint_port'] = value['Port'] + + # Target: Cache Nodes and Redis Cache Clusters (single node) + if key == 'ec2_endpoint' and value: + host_info['ec2_endpoint_address'] = value['Address'] + host_info['ec2_endpoint_port'] = value['Port'] + + # Target: Redis Replication Groups + if key == 'ec2_node_groups' and value: + host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] + host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] + replica_count = 0 + for node in value[0]['NodeGroupMembers']: + if node['CurrentRole'] == 'primary': + host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] + host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] + host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] + elif node['CurrentRole'] == 'replica': + host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] + host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] + host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] + replica_count += 1 + + # Target: Redis Replication Groups + if key == 'ec2_member_clusters' and value: + host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) + + # Target: All Cache Clusters + elif key == 'ec2_cache_parameter_group': + host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) + host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] + host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] + + # Target: Almost everything + elif key == 'ec2_security_groups': + + # Skip if SecurityGroups is None + # (it is possible to have the key defined but no value in it). + if value is not None: + sg_ids = [] + for sg in value: + sg_ids.append(sg['SecurityGroupId']) + host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) + + # Target: Everything + # Preserve booleans and integers + elif type(value) in [int, bool]: + host_info[key] = value + + # Target: Everything + # Sanitize string values + elif isinstance(value, six.string_types): + host_info[key] = value.strip() + + # Target: Everything + # Replace None by an empty string + elif type(value) == type(None): + host_info[key] = '' + + else: + # Remove non-processed complex types + pass + + return host_info + def get_host_info(self): ''' Get variables about a specific host ''' @@ -775,13 +1297,16 @@ class Ec2Inventory(object): cache.write(json_data) cache.close() + def uncammelize(self, key): + temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be - used as Ansible groups ''' - - return re.sub("[^A-Za-z0-9\-]", "_", word) - + ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' + regex = "[^A-Za-z0-9\_" + if not self.replace_dash_in_groups: + regex += "\-" + return re.sub(regex + "]", "_", word) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example new file mode 100644 index 000000000..637f13be6 --- /dev/null +++ b/inventory/byo/hosts.aep.example @@ -0,0 +1,286 @@ +# This is an example of a bring your own (byo) host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +lb + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# SSH user, this user should allow ssh based auth without requiring a +# password. If using ssh key based auth, then the key should be managed by an +# ssh agent. +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_sudo must be set to true and the +# user must be configured for passwordless sudo +#ansible_sudo=true + +# Debug level for all Atomic Enterprise components (Defaults to 2) +debug_level=2 + +# deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise +deployment_type=atomic-enterprise + +# Install the openshift examples +#openshift_install_examples=true + +# Enable cluster metrics +#use_cluster_metrics=true + +# Configure logoutURL in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url +#openshift_master_logout_url=http://example.com + +# Configure extensionScripts in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] + +# Configure extensionStylesheets in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_oauth_template=/path/to/login-template.html + +# Configure metricsPublicURL in the master config for cluster metrics +# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html +#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics + +# Configure loggingPublicURL in the master config for aggregate logging +# See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html +#openshift_master_logging_public_url=https://kibana.example.com + +# Docker Configuration +# Add additional, insecure, and blocked registries to global docker configuration +# For enterprise deployment types we ensure that registry.access.redhat.com is +# included if you do not include it +#cli_docker_additional_registries=registry.example.com +#cli_docker_insecure_registries=registry.example.com +#cli_docker_blocked_registries=registry.hacker.com +# Items added, as is, to end of /etc/sysconfig/docker OPTIONS +#openshift_docker_options="-l warn --ipv6=false" +# Deprecated methods to set --log-driver and --log-opts flags, use openshift_docker_options instead +#cli_docker_log_driver=json +#cli_docker_log_options="tag=mailer" + +# Alternate image format string. If you're not modifying the format string and +# only need to inject your own registry you may want to consider +# cli_docker_additional_registries instead +#oreg_url=example.com/aep3/aep-${component}:${version} + +# Additional yum repos to install +#openshift_additional_repos=[{'id': 'aep-devel', 'name': 'aep-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] + +# htpasswd auth +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] + +# Allow all auth +#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] + +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] + +# Project Configuration +#osm_project_request_message='' +#osm_project_request_template='' +#osm_mcs_allocator_range='s0:/2' +#osm_mcs_labels_per_project=5 +#osm_uid_allocator_range='1000000000-1999999999/10000' + +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + +# Native high availability cluster method with optional load balancer. +# If no lb group is defined, the installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +#openshift_master_cluster_method=native +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Pacemaker high availability cluster method. +# Pacemaker HA environment must be able to self provision the +# configured VIP. For installation openshift_master_cluster_hostname +# must resolve to the configured VIP. +#openshift_master_cluster_method=pacemaker +#openshift_master_cluster_password=openshift_cluster +#openshift_master_cluster_vip=192.168.133.25 +#openshift_master_cluster_public_vip=192.168.133.25 +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Override the default controller lease ttl +#osm_controller_lease_ttl=30 + +# Configure controller arguments +#osm_controller_args={'resource-quota-sync-period': ['10s']} + +# Configure api server arguments +#osm_api_server_args={'max-requests-inflight': ['400']} + +# default subdomain to use for exposed routes +#openshift_master_default_subdomain=apps.test.example.com + +# additional cors origins +#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] + +# default project node selector +#osm_default_node_selector='region=primary' + +# Override the default pod eviction timeout +#openshift_master_pod_eviction_timeout=5m + +# default storage plugin dependencies to install, by default the ceph and +# glusterfs plugin dependencies will be installed, if available. +#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] + +# default selectors for router and registry services +# openshift_router_selector='region=infra' +# openshift_registry_selector='region=infra' + +# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') +# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' + +# Disable the OpenShift SDN plugin +# openshift_use_openshift_sdn=False + +# Configure SDN cluster network CIDR block. This network block should +# be a private block and should not conflict with existing network +# blocks in your infrastructure that pods may require access to. +# Can not be changed after deployment. +#osm_cluster_network_cidr=10.1.0.0/16 + +# Configure number of bits to allocate to each host’s subnet e.g. 8 +# would mean a /24 network on the host. +#osm_host_subnet_length=8 + +# Configure master API and console ports. +#openshift_master_api_port=8443 +#openshift_master_console_port=8443 + +# set RPM version for debugging purposes +#openshift_pkg_version=-3.1.0.0 + +# Configure custom named certificates +# NOTE: openshift_master_named_certificates is cached on masters and is an +# additive fact, meaning that each run with a different set of certificates +# will add the newly provided certificates to the cached set of certificates. +# If you would like openshift_master_named_certificates to be overwritten with +# the provided value, specify openshift_master_overwrite_named_certificates. +#openshift_master_overwrite_named_certificates: true +# +# Provide local certificate paths which will be deployed to masters +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] +# +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + +# Configure nodeIP in the node config +# This is needed in cases where node traffic is desired to go over an +# interface other than the default network interface. +#openshift_node_set_node_ip=True + +# Force setting of system hostname when configuring OpenShift +# This works around issues related to installations that do not have valid dns +# entries for the interfaces attached to the host. +#openshift_set_hostname=True + +# Configure dnsIP in the node config +#openshift_dns_ip=172.30.0.1 + +# Persistent Storage Options +# +## Registry Storage Options +## +## Storage Kind +## Specifies which storage kind will be used for the registry. +## "nfs" is the only supported kind at this time. +##openshift_hosted_registry_storage_kind=nfs +## +## Storage Host +## This variable can be used to identify a pre-existing storage host +## if a storage host group corresponding to the storage kind (such as +## [nfs]) is not specified, +##openshift_hosted_registry_storage_host=nfs.example.com +## +## NFS Export Options +##openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' +## +## NFS Export Directory +## Specify the root exports directory. This directory will be created +## if specifying an [nfs] host group. +## This variable must be supplied if using a pre-existing nfs server. +##openshift_hosted_registry_storage_nfs_directory=/exports +## +## Registry Volume Name +## Specify the storage volume name. This directory will be created +## within openshift_hosted_registry_storage_nfs_directory if +## specifying an [nfs] group. Ex. /exports/registry +## This variable must be supplied if using a pre-existing nfs server. +##openshift_hosted_registry_storage_volume_name=registry +## +## Persistent Volume Access Mode +##openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] + +# Configure node kubelet arguments +#openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']} + +# Configure logrotate scripts +# See: https://github.com/nickhammond/ansible-logrotate +#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] + +# openshift-ansible will wait indefinitely for your input when it detects that the +# value of openshift_hostname resolves to an IP address not bound to any local +# interfaces. This mis-configuration is problematic for any pod leveraging host +# networking and liveness or readiness probes. +# Setting this variable to true will override that check. +#openshift_override_hostname_check=true + +# host group for masters +[masters] +aep3-master[1:3]-ansible.test.example.com + +[etcd] +aep3-etcd[1:3]-ansible.test.example.com + +[lb] +aep3-lb-ansible.test.example.com + +# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes +# However, in order to ensure that your masters are not burdened with running pods you should +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. +[nodes] +aep3-master[1:3]-ansible.test.example.com +aep3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/byo/hosts.aep_quickstart b/inventory/byo/hosts.aep_quickstart new file mode 100644 index 000000000..46ea3a03f --- /dev/null +++ b/inventory/byo/hosts.aep_quickstart @@ -0,0 +1,20 @@ +[OSEv3:children] +masters +nodes +etcd +lb + +[OSEv3:vars] +ansible_ssh_user=root +deployment_type=atomic-enterprise +osm_use_cockpit=true + +[masters] +ose3-master.example.com + +[nodes] +ose3-master.example.com openshift_scheduleable=True + +[etcd] + +[lb] diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example deleted file mode 100644 index df1bae49f..000000000 --- a/inventory/byo/hosts.example +++ /dev/null @@ -1,87 +0,0 @@ -# This is an example of a bring your own (byo) host inventory - -# Create an OSEv3 group that contains the masters and nodes groups -[OSEv3:children] -masters -nodes -etcd - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] -# SSH user, this user should allow ssh based auth without requiring a -# password. If using ssh key based auth, then the key should be managed by an -# ssh agent. -ansible_ssh_user=root - -# If ansible_ssh_user is not root, ansible_sudo must be set to true and the -# user must be configured for passwordless sudo -#ansible_sudo=true - -# deployment type valid values are origin, online and enterprise -deployment_type=atomic-enterprise - -# Enable cluster metrics -#use_cluster_metrics=true - -# Pre-release registry URL -#oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version} - -# Pre-release Dev puddle repo -#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] - -# Pre-release Errata puddle repo -#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] - -# Origin copr repo -#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] - -# htpasswd auth -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}] - -# Allow all auth -#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] - -# Configure Fluentd -#use_fluentd=true - -# master cluster ha variables using pacemaker or RHEL HA -#openshift_master_cluster_password=openshift_cluster -#openshift_master_cluster_vip=192.168.133.25 -#openshift_master_cluster_public_vip=192.168.133.25 -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com - -# master cluster ha variables when using a different HA solution -# For installation the value of openshift_master_cluster_hostname must resolve -# to the first master defined in the inventory. -# The HA solution must be manually configured after installation and must ensure -# that the master is running on a single master host. -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_defer_ha=True - -# default subdomain to use for exposed routes -#osm_default_subdomain=apps.test.example.com - -# additional cors origins -#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] - -# default project node selector -#osm_default_node_selector='region=primary' - -# set RPM version for debugging purposes -#openshift_pkg_version=-3.0.0.0 - -# host group for masters -[masters] -ose3-master[1:3]-ansible.test.example.com - -[etcd] -ose3-etcd[1:3]-ansible.test.example.com - -# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes -# However, in order to ensure that your masters are not burdened with running pods you should -# make them unschedulable by adding openshift_scheduleable=False any node that's also a master. -[nodes] -ose3-master[1:3]-ansible.test.example.com -ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/byo/hosts.openstack b/inventory/byo/hosts.openstack new file mode 100644 index 000000000..05df75c2f --- /dev/null +++ b/inventory/byo/hosts.openstack @@ -0,0 +1,37 @@ +# This is an example of a bring your own (byo) host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +lb + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +ansible_ssh_user=cloud-user +ansible_sudo=true + +# Debug level for all OpenShift components (Defaults to 2) +debug_level=2 + +deployment_type=openshift-enterprise + +openshift_additional_repos=[{'id': 'ose-3.1', 'name': 'ose-3.1', 'baseurl': 'http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/x86_64/ose/3.1/os', 'enabled': 1, 'gpgcheck': 0}] + +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift.common.config_base }}/htpasswd'}] + +#openshift_pkg_version=-3.0.0.0 + +[masters] +jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" + +[etcd] +jdetiber-etcd.usersys.redhat.com + +[lb] +#ose3-lb-ansible.test.example.com + +[nodes] +jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" +jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example new file mode 100644 index 000000000..c30f65f9f --- /dev/null +++ b/inventory/byo/hosts.origin.example @@ -0,0 +1,291 @@ +# This is an example of a bring your own (byo) host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +lb +nfs + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# SSH user, this user should allow ssh based auth without requiring a +# password. If using ssh key based auth, then the key should be managed by an +# ssh agent. +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_sudo must be set to true and the +# user must be configured for passwordless sudo +#ansible_sudo=true + +# Debug level for all OpenShift components (Defaults to 2) +debug_level=2 + +# deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise +deployment_type=origin + +# Install the openshift examples +#openshift_install_examples=true + +# Enable cluster metrics +#use_cluster_metrics=true + +# Configure logoutURL in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url +#openshift_master_logout_url=http://example.com + +# Configure extensionScripts in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] + +# Configure extensionStylesheets in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_oauth_template=/path/to/login-template.html + +# Configure metricsPublicURL in the master config for cluster metrics +# See: https://docs.openshift.org/latest/install_config/cluster_metrics.html +#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics + +# Configure loggingPublicURL in the master config for aggregate logging +# See: https://docs.openshift.org/latest/install_config/aggregate_logging.html +#openshift_master_logging_public_url=https://kibana.example.com + +# Docker Configuration +# Add additional, insecure, and blocked registries to global docker configuration +# For enterprise deployment types we ensure that registry.access.redhat.com is +# included if you do not include it +#cli_docker_additional_registries=registry.example.com +#cli_docker_insecure_registries=registry.example.com +#cli_docker_blocked_registries=registry.hacker.com +# Items added, as is, to end of /etc/sysconfig/docker OPTIONS +#openshift_docker_options="-l warn --ipv6=false" +# Deprecated methods to set --log-driver and --log-opts flags, use openshift_docker_options instead +#cli_docker_log_driver=json +#cli_docker_log_options="tag=mailer" + +# Alternate image format string. If you're not modifying the format string and +# only need to inject your own registry you may want to consider +# cli_docker_additional_registries instead +#oreg_url=example.com/openshift3/ose-${component}:${version} + +# Origin copr repo +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] + +# Origin Fedora copr repo +# Use this if you are installing on Fedora +#openshift_additional_repos=[{'id': 'fedora-openshift-origin-copr', 'name': 'OpenShift Origin COPR for Fedora', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg'}] + +# htpasswd auth +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] + +# Allow all auth +#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] + +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] + +# Project Configuration +#osm_project_request_message='' +#osm_project_request_template='' +#osm_mcs_allocator_range='s0:/2' +#osm_mcs_labels_per_project=5 +#osm_uid_allocator_range='1000000000-1999999999/10000' + +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + +# Native high availability cluster method with optional load balancer. +# If no lb group is defined, the installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +#openshift_master_cluster_method=native +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Pacemaker high availability cluster method. +# Pacemaker HA environment must be able to self provision the +# configured VIP. For installation openshift_master_cluster_hostname +# must resolve to the configured VIP. +#openshift_master_cluster_method=pacemaker +#openshift_master_cluster_password=openshift_cluster +#openshift_master_cluster_vip=192.168.133.25 +#openshift_master_cluster_public_vip=192.168.133.25 +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Override the default controller lease ttl +#osm_controller_lease_ttl=30 + +# Configure controller arguments +#osm_controller_args={'resource-quota-sync-period': ['10s']} + +# Configure api server arguments +#osm_api_server_args={'max-requests-inflight': ['400']} + +# default subdomain to use for exposed routes +#openshift_master_default_subdomain=apps.test.example.com + +# additional cors origins +#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] + +# default project node selector +#osm_default_node_selector='region=primary' + +# Override the default pod eviction timeout +#openshift_master_pod_eviction_timeout=5m + +# default storage plugin dependencies to install, by default the ceph and +# glusterfs plugin dependencies will be installed, if available. +#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] + +# default selectors for router and registry services +# openshift_router_selector='region=infra' +# openshift_registry_selector='region=infra' + +# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') +# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' + +# Disable the OpenShift SDN plugin +# openshift_use_openshift_sdn=False + +# Configure SDN cluster network CIDR block. This network block should +# be a private block and should not conflict with existing network +# blocks in your infrastructure that pods may require access to. +# Can not be changed after deployment. +#osm_cluster_network_cidr=10.1.0.0/16 + +# Configure number of bits to allocate to each host’s subnet e.g. 8 +# would mean a /24 network on the host. +#osm_host_subnet_length=8 + +# Configure master API and console ports. +#openshift_master_api_port=8443 +#openshift_master_console_port=8443 + +# set RPM version for debugging purposes +#openshift_pkg_version=-1.1 + +# Configure custom named certificates +# NOTE: openshift_master_named_certificates is cached on masters and is an +# additive fact, meaning that each run with a different set of certificates +# will add the newly provided certificates to the cached set of certificates. +# If you would like openshift_master_named_certificates to be overwritten with +# the provided value, specify openshift_master_overwrite_named_certificates. +#openshift_master_overwrite_named_certificates: true +# +# Provide local certificate paths which will be deployed to masters +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] +# +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + +# Configure nodeIP in the node config +# This is needed in cases where node traffic is desired to go over an +# interface other than the default network interface. +#openshift_node_set_node_ip=True + +# Force setting of system hostname when configuring OpenShift +# This works around issues related to installations that do not have valid dns +# entries for the interfaces attached to the host. +#openshift_set_hostname=True + +# Configure dnsIP in the node config +#openshift_dns_ip=172.30.0.1 + +# Persistent Storage Options +# +## Registry Storage Options +## +## Storage Kind +## Specifies which storage kind will be used for the registry. +## nfs is the only supported kind at this time. +##openshift_hosted_registry_storage_kind=nfs +## +## Storage Host +## This variable can be used to identify a pre-existing storage host +## if a storage host group corresponding to the storage kind (such as +## [nfs]) is not specified, +##openshift_hosted_registry_storage_host=nfs.example.com +## +## NFS Export Options +##openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' +## +## NFS Export Directory +## Specify the root exports directory. This directory will be created +## if specifying an [nfs] host group. +## This variable must be supplied if using a pre-existing nfs server. +##openshift_hosted_registry_storage_nfs_directory=/exports +## +## Registry Volume Name +## Specify the storage volume name. This directory will be created +## within openshift_hosted_registry_storage_nfs_directory if +## specifying an [nfs] group. Ex: /exports/registry +## This variable must be supplied if using a pre-existing nfs server. +##openshift_hosted_registry_storage_volume_name=registry +## +## Persistent Volume Access Mode +##openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] + +# Configure node kubelet arguments +#openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']} + +# Configure logrotate scripts +# See: https://github.com/nickhammond/ansible-logrotate +#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] + +# openshift-ansible will wait indefinitely for your input when it detects that the +# value of openshift_hostname resolves to an IP address not bound to any local +# interfaces. This mis-configuration is problematic for any pod leveraging host +# networking and liveness or readiness probes. +# Setting this variable to true will override that check. +#openshift_override_hostname_check=true + +# host group for masters +[masters] +ose3-master[1:3]-ansible.test.example.com + +[etcd] +ose3-etcd[1:3]-ansible.test.example.com + +[lb] +ose3-lb-ansible.test.example.com + +# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes +# However, in order to ensure that your masters are not burdened with running pods you should +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. +[nodes] +ose3-master[1:3]-ansible.test.example.com +ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example new file mode 100644 index 000000000..b51569e68 --- /dev/null +++ b/inventory/byo/hosts.ose.example @@ -0,0 +1,287 @@ +# This is an example of a bring your own (byo) host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +lb + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# SSH user, this user should allow ssh based auth without requiring a +# password. If using ssh key based auth, then the key should be managed by an +# ssh agent. +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_sudo must be set to true and the +# user must be configured for passwordless sudo +#ansible_sudo=true + +# Debug level for all OpenShift components (Defaults to 2) +debug_level=2 + +# deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise +deployment_type=openshift-enterprise + +# Install the openshift examples +#openshift_install_examples=true + +# Enable cluster metrics +#use_cluster_metrics=true + +# Configure logoutURL in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url +#openshift_master_logout_url=http://example.com + +# Configure extensionScripts in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] + +# Configure extensionStylesheets in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_oauth_template=/path/to/login-template.html + +# Configure metricsPublicURL in the master config for cluster metrics +# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html +#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics + +# Configure loggingPublicURL in the master config for aggregate logging +# See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html +#openshift_master_logging_public_url=https://kibana.example.com + +# Docker Configuration +# Add additional, insecure, and blocked registries to global docker configuration +# For enterprise deployment types we ensure that registry.access.redhat.com is +# included if you do not include it +#cli_docker_additional_registries=registry.example.com +#cli_docker_insecure_registries=registry.example.com +#cli_docker_blocked_registries=registry.hacker.com +# Items added, as is, to end of /etc/sysconfig/docker OPTIONS +#openshift_docker_options="-l warn --ipv6=false" +# Deprecated methods to set --log-driver and --log-opts flags, use openshift_docker_options instead +#cli_docker_log_driver=json +#cli_docker_log_options="tag=mailer" + + +# Alternate image format string. If you're not modifying the format string and +# only need to inject your own registry you may want to consider +# cli_docker_additional_registries instead +#oreg_url=example.com/openshift3/ose-${component}:${version} + +# Additional yum repos to install +#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] + +# htpasswd auth +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] + +# Allow all auth +#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] + +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] + +# Project Configuration +#osm_project_request_message='' +#osm_project_request_template='' +#osm_mcs_allocator_range='s0:/2' +#osm_mcs_labels_per_project=5 +#osm_uid_allocator_range='1000000000-1999999999/10000' + +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + +# Native high availability cluster method with optional load balancer. +# If no lb group is defined, the installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +#openshift_master_cluster_method=native +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Pacemaker high availability cluster method. +# Pacemaker HA environment must be able to self provision the +# configured VIP. For installation openshift_master_cluster_hostname +# must resolve to the configured VIP. +#openshift_master_cluster_method=pacemaker +#openshift_master_cluster_password=openshift_cluster +#openshift_master_cluster_vip=192.168.133.25 +#openshift_master_cluster_public_vip=192.168.133.25 +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Override the default controller lease ttl +#osm_controller_lease_ttl=30 + +# Configure controller arguments +#osm_controller_args={'resource-quota-sync-period': ['10s']} + +# Configure api server arguments +#osm_api_server_args={'max-requests-inflight': ['400']} + +# default subdomain to use for exposed routes +#openshift_master_default_subdomain=apps.test.example.com + +# additional cors origins +#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] + +# default project node selector +#osm_default_node_selector='region=primary' + +# Override the default pod eviction timeout +#openshift_master_pod_eviction_timeout=5m + +# default storage plugin dependencies to install, by default the ceph and +# glusterfs plugin dependencies will be installed, if available. +#osn_storage_plugin_deps=['ceph','glusterfs'] + +# default selectors for router and registry services +# openshift_router_selector='region=infra' +# openshift_registry_selector='region=infra' + +# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') +# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' + +# Disable the OpenShift SDN plugin +# openshift_use_openshift_sdn=False + +# Configure SDN cluster network CIDR block. This network block should +# be a private block and should not conflict with existing network +# blocks in your infrastructure that pods may require access to. +# Can not be changed after deployment. +#osm_cluster_network_cidr=10.1.0.0/16 + +# Configure number of bits to allocate to each host’s subnet e.g. 8 +# would mean a /24 network on the host. +#osm_host_subnet_length=8 + +# Configure master API and console ports. +#openshift_master_api_port=8443 +#openshift_master_console_port=8443 + +# set RPM version for debugging purposes +#openshift_pkg_version=-3.1.0.0 + +# Configure custom named certificates +# NOTE: openshift_master_named_certificates is cached on masters and is an +# additive fact, meaning that each run with a different set of certificates +# will add the newly provided certificates to the cached set of certificates. +# If you would like openshift_master_named_certificates to be overwritten with +# the provided value, specify openshift_master_overwrite_named_certificates. +#openshift_master_overwrite_named_certificates: true +# +# Provide local certificate paths which will be deployed to masters +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] +# +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + +# Configure nodeIP in the node config +# This is needed in cases where node traffic is desired to go over an +# interface other than the default network interface. +#openshift_node_set_node_ip=True + +# Force setting of system hostname when configuring OpenShift +# This works around issues related to installations that do not have valid dns +# entries for the interfaces attached to the host. +#openshift_set_hostname=True + +# Configure dnsIP in the node config +#openshift_dns_ip=172.30.0.1 + +# Persistent Storage Options +# +## Registry Storage Options +## +## Storage Kind +## Specifies which storage kind will be used for the registry. +## "nfs" is the only supported kind at this time. +##openshift_hosted_registry_storage_kind=nfs +## +## Storage Host +## This variable can be used to identify a pre-existing storage host +## if a storage host group corresponding to the storage kind (such as +## [nfs]) is not specified, +##openshift_hosted_registry_storage_host=nfs.example.com +## +## NFS Export Options +##openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' +## +## NFS Export Directory +## Specify the root exports directory. This directory will be created +## if specifying an [nfs] host group. +## This variable must be supplied if using a pre-existing nfs server. +##openshift_hosted_registry_storage_nfs_directory=/exports +## +## Registry Volume Name +## Specify the storage volume name. This directory will be created +## within openshift_hosted_registry_storage_nfs_directory if +## specifying an [nfs] group Ex: /exports/registry +## This variable must be supplied if using a pre-existing nfs server. +##openshift_hosted_registry_storage_volume_name=registry +## +## Persistent Volume Access Mode +##openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] + +# Configure node kubelet arguments +#openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']} + +# Configure logrotate scripts +# See: https://github.com/nickhammond/ansible-logrotate +#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] + +# openshift-ansible will wait indefinitely for your input when it detects that the +# value of openshift_hostname resolves to an IP address not bound to any local +# interfaces. This mis-configuration is problematic for any pod leveraging host +# networking and liveness or readiness probes. +# Setting this variable to true will override that check. +#openshift_override_hostname_check=true + +# host group for masters +[masters] +ose3-master[1:3]-ansible.test.example.com + +[etcd] +ose3-etcd[1:3]-ansible.test.example.com + +[lb] +ose3-lb-ansible.test.example.com + +# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes +# However, in order to ensure that your masters are not burdened with running pods you should +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. +[nodes] +ose3-master[1:3]-ansible.test.example.com +ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py index 3403f735e..99746cdbf 100755 --- a/inventory/gce/hosts/gce.py +++ b/inventory/gce/hosts/gce.py @@ -66,12 +66,22 @@ Examples: $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" Use the GCE inventory script to print out instance specific information - $ plugins/inventory/gce.py --host my_instance + $ contrib/inventory/gce.py --host my_instance Author: Eric Johnson <erjohnso@google.com> Version: 0.0.1 ''' +__requires__ = ['pycrypto>=2.6'] +try: + import pkg_resources +except ImportError: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. We don't + # fail here as there is code that better expresses the errors where the + # library is used. + pass + USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" USER_AGENT_VERSION="v1" @@ -102,9 +112,9 @@ class GceInventory(object): # Just display data for specific host if self.args.host: - print self.json_format_dict(self.node_to_dict( + print(self.json_format_dict(self.node_to_dict( self.get_instance(self.args.host)), - pretty=self.args.pretty) + pretty=self.args.pretty)) sys.exit(0) # Otherwise, assume user wants all instances grouped @@ -211,7 +221,7 @@ class GceInventory(object): 'gce_image': inst.image, 'gce_machine_type': inst.size, 'gce_private_ip': inst.private_ips[0], - 'gce_public_ip': inst.public_ips[0], + 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, 'gce_name': inst.name, 'gce_description': inst.extra['description'], 'gce_status': inst.extra['status'], @@ -220,14 +230,14 @@ class GceInventory(object): 'gce_metadata': md, 'gce_network': net, # Hosts don't have a public name, so we add an IP - 'ansible_ssh_host': inst.public_ips[0] + 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] } def get_instance(self, instance_name): '''Gets details about a specific instance ''' try: return self.driver.ex_get_node(instance_name) - except Exception, e: + except Exception as e: return None def group_instances(self): @@ -247,7 +257,10 @@ class GceInventory(object): tags = node.extra['tags'] for t in tags: - tag = 'tag_%s' % t + if t.startswith('group-'): + tag = t[6:] + else: + tag = 'tag_%s' % t if groups.has_key(tag): groups[tag].append(name) else: groups[tag] = [name] diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example deleted file mode 100644 index 99f157b11..000000000 --- a/inventory/multi_ec2.yaml.example +++ /dev/null @@ -1,32 +0,0 @@ -# multi ec2 inventory configs -# -cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache - -accounts: - - name: aws1 - provider: aws/hosts/ec2.py - provider_config: - ec2: - regions: all - regions_exclude: us-gov-west-1,cn-north-1 - destination_variable: public_dns_name - route53: False - cache_path: ~/.ansible/tmp - cache_max_age: 300 - vpc_destination_variable: ip_address - env_vars: - AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX - AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - all_group: ec2 - hostvars: - cloud: aws - account: aws1 - -- name: aws2 - provider: aws/hosts/ec2.py - env_vars: - AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX - AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - EC2_INI_PATH: /etc/ansible/ec2.ini - -cache_max_age: 60 diff --git a/inventory/multi_ec2.py b/inventory/multi_inventory.py index 2cbf33473..20fc48aa9 100755 --- a/inventory/multi_ec2.py +++ b/inventory/multi_inventory.py @@ -1,6 +1,6 @@ #!/usr/bin/env python2 ''' - Fetch and combine multiple ec2 account settings into a single + Fetch and combine multiple inventory account settings into a single json hash. ''' # vim: expandtab:tabstop=4:shiftwidth=4 @@ -15,13 +15,19 @@ import errno import fcntl import tempfile import copy +from string import Template +import shutil -CONFIG_FILE_NAME = 'multi_ec2.yaml' -DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache') +CONFIG_FILE_NAME = 'multi_inventory.yaml' +DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache') -class MultiEc2(object): +class MultiInventoryException(Exception): + '''Exceptions for MultiInventory class''' + pass + +class MultiInventory(object): ''' - MultiEc2 class: + MultiInventory class: Opens a yaml config file and reads aws credentials. Stores a json hash of resources in result. ''' @@ -35,7 +41,7 @@ class MultiEc2(object): self.cache_path = DEFAULT_CACHE_PATH self.config = None - self.all_ec2_results = {} + self.all_inventory_results = {} self.result = {} self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) @@ -50,15 +56,6 @@ class MultiEc2(object): else: self.config_file = None # expect env vars - - def run(self): - '''This method checks to see if the local - cache is valid for the inventory. - - if the cache is valid; return cache - else the credentials are loaded from multi_ec2.yaml or from the env - and we attempt to get the inventory from the provider specified. - ''' # load yaml if self.config_file and os.path.isfile(self.config_file): self.config = self.load_yaml_config() @@ -85,6 +82,15 @@ class MultiEc2(object): if self.config.has_key('cache_location'): self.cache_path = self.config['cache_location'] + def run(self): + '''This method checks to see if the local + cache is valid for the inventory. + + if the cache is valid; return cache + else the credentials are loaded from multi_inventory.yaml or from the env + and we attempt to get the inventory from the provider specified. + ''' + if self.args.get('refresh_cache', None): self.get_inventory() self.write_to_cache() @@ -111,6 +117,10 @@ class MultiEc2(object): with open(conf_file) as conf: config = yaml.safe_load(conf) + # Provide a check for unique account names + if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']): + raise MultiInventoryException('Duplicate account names in config file') + return config def get_provider_tags(self, provider, env=None): @@ -136,23 +146,25 @@ class MultiEc2(object): else: cmds.append('--list') - cmds.append('--refresh-cache') + if 'aws' in provider.lower(): + cmds.append('--refresh-cache') return subprocess.Popen(cmds, stderr=subprocess.PIPE, \ stdout=subprocess.PIPE, env=env) @staticmethod - def generate_config(config_data): - """Generate the ec2.ini file in as a secure temp file. - Once generated, pass it to the ec2.py as an environment variable. + def generate_config(provider_files): + """Generate the provider_files in a temporary directory. """ - fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.') - for section, values in config_data.items(): - os.write(fildes, "[%s]\n" % section) - for option, value in values.items(): - os.write(fildes, "%s = %s\n" % (option, value)) - os.close(fildes) - return tmp_file_path + prefix = 'multi_inventory.' + tmp_dir_path = tempfile.mkdtemp(prefix=prefix) + for provider_file in provider_files: + filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+') + content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path) + filedes.write(content) + filedes.close() + + return tmp_dir_path def run_provider(self): '''Setup the provider call with proper variables @@ -160,13 +172,21 @@ class MultiEc2(object): ''' try: all_results = [] - tmp_file_paths = [] + tmp_dir_paths = [] processes = {} for account in self.config['accounts']: - env = account['env_vars'] - if account.has_key('provider_config'): - tmp_file_paths.append(MultiEc2.generate_config(account['provider_config'])) - env['EC2_INI_PATH'] = tmp_file_paths[-1] + tmp_dir = None + if account.has_key('provider_files'): + tmp_dir = MultiInventory.generate_config(account['provider_files']) + tmp_dir_paths.append(tmp_dir) + + # Update env vars after creating provider_config_files + # so that we can grab the tmp_dir if it exists + env = account.get('env_vars', {}) + if env and tmp_dir: + for key, value in env.items(): + env[key] = Template(value).substitute(tmpdir=tmp_dir) + name = account['name'] provider = account['provider'] processes[name] = self.get_provider_tags(provider, env) @@ -182,9 +202,9 @@ class MultiEc2(object): }) finally: - # Clean up the mkstemp file - for tmp_file in tmp_file_paths: - os.unlink(tmp_file) + # Clean up the mkdtemp dirs + for tmp_dir in tmp_dir_paths: + shutil.rmtree(tmp_dir) return all_results @@ -223,7 +243,7 @@ class MultiEc2(object): ] raise RuntimeError('\n'.join(err_msg).format(**result)) else: - self.all_ec2_results[result['name']] = json.loads(result['out']) + self.all_inventory_results[result['name']] = json.loads(result['out']) # Check if user wants extra vars in yaml by # having hostvars and all_group defined @@ -231,33 +251,69 @@ class MultiEc2(object): self.apply_account_config(acc_config) # Build results by merging all dictionaries - values = self.all_ec2_results.values() + values = self.all_inventory_results.values() values.insert(0, self.result) for result in values: - MultiEc2.merge_destructively(self.result, result) + MultiInventory.merge_destructively(self.result, result) + + def add_entry(self, data, keys, item): + ''' Add an item to a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + item = c + ''' + if "." in keys: + key, rest = keys.split(".", 1) + if key not in data: + data[key] = {} + self.add_entry(data[key], rest, item) + else: + data[keys] = item + + def get_entry(self, data, keys): + ''' Get an item from a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + return c + ''' + if keys and "." in keys: + key, rest = keys.split(".", 1) + return self.get_entry(data[key], rest) + else: + return data.get(keys, None) def apply_account_config(self, acc_config): ''' Apply account config settings ''' - if not acc_config.has_key('hostvars') and not acc_config.has_key('all_group'): - return - - results = self.all_ec2_results[acc_config['name']] - # Update each hostvar with the newly desired key: value - for host_property, value in acc_config['hostvars'].items(): - # Verify the account results look sane - # by checking for these keys ('_meta' and 'hostvars' exist) - if results.has_key('_meta') and results['_meta'].has_key('hostvars'): - for data in results['_meta']['hostvars'].values(): - data[str(host_property)] = str(value) - - # Add this group - if results.has_key(acc_config['all_group']): - results["%s_%s" % (host_property, value)] = \ - copy.copy(results[acc_config['all_group']]) - - # store the results back into all_ec2_results - self.all_ec2_results[acc_config['name']] = results + results = self.all_inventory_results[acc_config['name']] + results['all_hosts'] = results['_meta']['hostvars'].keys() + + # Extra vars go here + for new_var, value in acc_config.get('extra_vars', {}).items(): + for data in results['_meta']['hostvars'].values(): + self.add_entry(data, new_var, value) + + # Clone vars go here + for to_name, from_name in acc_config.get('clone_vars', {}).items(): + for data in results['_meta']['hostvars'].values(): + self.add_entry(data, to_name, self.get_entry(data, from_name)) + + # Extra groups go here + for new_var, value in acc_config.get('extra_groups', {}).items(): + for data in results['_meta']['hostvars'].values(): + results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts']) + + # Clone groups go here + # Build a group based on the desired key name + for to_name, from_name in acc_config.get('clone_groups', {}).items(): + for name, data in results['_meta']['hostvars'].items(): + key = '%s_%s' % (to_name, self.get_entry(data, from_name)) + if not results.has_key(key): + results[key] = [] + results[key].append(name) + + # store the results back into all_inventory_results + self.all_inventory_results[acc_config['name']] = results @staticmethod def merge_destructively(input_a, input_b): @@ -265,7 +321,7 @@ class MultiEc2(object): for key in input_b: if key in input_a: if isinstance(input_a[key], dict) and isinstance(input_b[key], dict): - MultiEc2.merge_destructively(input_a[key], input_b[key]) + MultiInventory.merge_destructively(input_a[key], input_b[key]) elif input_a[key] == input_b[key]: pass # same leaf value # both lists so add each element in b to a if it does ! exist @@ -321,7 +377,7 @@ class MultiEc2(object): if exc.errno != errno.EEXIST or not os.path.isdir(path): raise - json_data = MultiEc2.json_format_dict(self.result, True) + json_data = MultiInventory.json_format_dict(self.result, True) with open(self.cache_path, 'w') as cache: try: fcntl.flock(cache, fcntl.LOCK_EX) @@ -357,7 +413,7 @@ class MultiEc2(object): if __name__ == "__main__": - MEC2 = MultiEc2() - MEC2.parse_cli_args() - MEC2.run() - print MEC2.result_str() + MI2 = MultiInventory() + MI2.parse_cli_args() + MI2.run() + print MI2.result_str() diff --git a/inventory/multi_inventory.yaml.example b/inventory/multi_inventory.yaml.example new file mode 100644 index 000000000..0f0788d18 --- /dev/null +++ b/inventory/multi_inventory.yaml.example @@ -0,0 +1,51 @@ +# multi ec2 inventory configs +# +cache_location: ~/.ansible/tmp/multi_inventory.cache + +accounts: + - name: aws1 + provider: aws/ec2.py + provider_files: + - name: ec2.ini + content: |- + [ec2] + regions = all + regions_exclude = us-gov-west-1,cn-north-1 + destination_variable = public_dns_name + route53 = False + cache_path = ~/.ansible/tmp + cache_max_age = 300 + vpc_destination_variable = ip_address + env_vars: + AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX + AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + EC2_INI_PATH: ${tmpdir}/ec2.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider. + extra_vars: + cloud: aws + account: aws1 + +- name: mygce + extra_vars: + cloud: gce + account: gce1 + env_vars: + GCE_INI_PATH: ${tmpdir}/gce.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider. + provider: gce/gce.py + provider_files: + - name: priv_key.pem + contents: |- + -----BEGIN PRIVATE KEY----- + yourprivatekeydatahere + -----END PRIVATE KEY----- + - name: gce.ini + contents: |- + [gce] + gce_service_account_email_address = <uuid>@developer.gserviceaccount.com + gce_service_account_pem_file_path = ${tmpdir}/priv_key.pem # we replace ${tmpdir} with the temporary directory that we've created for the provider. + gce_project_id = gce-project + zone = us-central1-a + network = default + gce_machine_type = n1-standard-2 + gce_machine_image = rhel7 + +cache_max_age: 600 diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec deleted file mode 100644 index f163f865a..000000000 --- a/inventory/openshift-ansible-inventory.spec +++ /dev/null @@ -1,108 +0,0 @@ -Summary: OpenShift Ansible Inventories -Name: openshift-ansible-inventory -Version: 0.0.9 -Release: 1%{?dist} -License: ASL 2.0 -URL: https://github.com/openshift/openshift-ansible -Source0: %{name}-%{version}.tar.gz -Requires: python2 -BuildRequires: python2-devel -BuildArch: noarch - -%description -Ansible Inventories used with the openshift-ansible scripts and playbooks. - -%prep -%setup -q - -%build - -%install -mkdir -p %{buildroot}/etc/ansible -mkdir -p %{buildroot}/usr/share/ansible/inventory -mkdir -p %{buildroot}/usr/share/ansible/inventory/aws -mkdir -p %{buildroot}/usr/share/ansible/inventory/gce - -cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory -cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml -cp -p aws/hosts/ec2.py %{buildroot}/usr/share/ansible/inventory/aws -cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce - -%files -%config(noreplace) /etc/ansible/* -%dir /usr/share/ansible/inventory -/usr/share/ansible/inventory/multi_ec2.py* -/usr/share/ansible/inventory/aws/ec2.py* -/usr/share/ansible/inventory/gce/gce.py* - -%changelog -* Thu Aug 20 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.9-1 -- Merge pull request #408 from sdodson/docker-buildvm (bleanhar@redhat.com) -- Merge pull request #428 from jtslear/issue-383 - (twiest@users.noreply.github.com) -- Merge pull request #407 from aveshagarwal/ae-ansible-merge-auth - (bleanhar@redhat.com) -- Enable htpasswd by default in the example hosts file. (avagarwa@redhat.com) -- Add support for setting default node selector (jdetiber@redhat.com) -- Merge pull request #429 from spinolacastro/custom_cors (bleanhar@redhat.com) -- Updated to read config first and default to users home dir - (kwoodson@redhat.com) -- Fix Custom Cors (spinolacastro@gmail.com) -- Revert "namespace the byo inventory so the group names aren't so generic" - (sdodson@redhat.com) -- Removes hardcoded python2 (jtslear@gmail.com) -- namespace the byo inventory so the group names aren't so generic - (admiller@redhat.com) -- docker-buildvm-rhose is dead (sdodson@redhat.com) -- Add support for setting routingConfig:subdomain (jdetiber@redhat.com) -- Initial HA master (jdetiber@redhat.com) -- Make it clear that the byo inventory file is just an example - (jdetiber@redhat.com) -- Playbook updates for clustered etcd (jdetiber@redhat.com) -- Update for RC2 changes (sdodson@redhat.com) -- Templatize configs and 0.5.2 changes (jdetiber@redhat.com) - -* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.8-1 -- Added more verbosity when error happens. Also fixed a bug. - (kwoodson@redhat.com) -- Implement OpenStack provider (lhuard@amadeus.com) -- * rename openshift_registry_url oreg_url * rename option_images to - _{oreg|ortr}_images (jhonce@redhat.com) -- Fix the remaining pylint warnings (lhuard@amadeus.com) -- Fix some of the pylint warnings (lhuard@amadeus.com) -- [libvirt cluster] Use net-dhcp-leases to find VMs’ IPs (lhuard@amadeus.com) -- fixed the openshift-ansible-bin build (twiest@redhat.com) - -* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.7-1 -- Making multi_ec2 into a library (kwoodson@redhat.com) - -* Wed May 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1 -- Added support for grouping and a bug fix. (kwoodson@redhat.com) - -* Tue May 12 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1 -- removed ec2.ini from the openshift-ansible-inventory.spec file so that we're - not dictating what the ec2.ini file should look like. (twiest@redhat.com) -- Added capability to pass in ec2.ini file. (kwoodson@redhat.com) - -* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1 -- Fixed a bug due to renaming of variables. (kwoodson@redhat.com) - -* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1 -- fixed build problems with openshift-ansible-inventory.spec - (twiest@redhat.com) -- Allow option in multi_ec2 to set cache location. (kwoodson@redhat.com) -- Add ansible_connection=local to localhost in inventory (jdetiber@redhat.com) -- Adding refresh-cache option and cleanup for pylint. Also updated for - aws/hosts/ being added. (kwoodson@redhat.com) - -* Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1 -- added the ability to have a config file in /etc/openshift_ansible to - multi_ec2.py. (twiest@redhat.com) -- Merge pull request #97 from jwhonce/wip/cluster (jhonce@redhat.com) -- gce inventory/playbook updates for node registration changes - (jdetiber@redhat.com) -- Various fixes (jdetiber@redhat.com) - -* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1 -- new package built with tito - diff --git a/inventory/openstack/hosts/nova.py b/inventory/openstack/hosts/nova.py index d5bd8d1ee..3197a57bc 100755 --- a/inventory/openstack/hosts/nova.py +++ b/inventory/openstack/hosts/nova.py @@ -34,7 +34,7 @@ except ImportError: # executed with no parameters, return the list of # all groups and hosts -NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini", +NOVA_CONFIG_FILES = [os.path.join(os.path.dirname(os.path.realpath(__file__)), "nova.ini"), os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")), "/etc/ansible/nova.ini"] |