summaryrefslogtreecommitdiffstats
path: root/inventory
diff options
context:
space:
mode:
Diffstat (limited to 'inventory')
-rw-r--r--inventory/README.md9
-rw-r--r--inventory/aws/hosts/ec2.ini56
-rwxr-xr-xinventory/aws/hosts/ec2.py292
-rw-r--r--inventory/byo/hosts.origin.example153
-rw-r--r--inventory/byo/hosts.ose.example147
-rwxr-xr-xinventory/gce/hosts/gce.py253
-rw-r--r--inventory/hosts2
-rwxr-xr-xinventory/libvirt/hosts/libvirt_generic.py11
-rwxr-xr-xinventory/openstack/hosts/openstack.py1
9 files changed, 771 insertions, 153 deletions
diff --git a/inventory/README.md b/inventory/README.md
new file mode 100644
index 000000000..b61bfff18
--- /dev/null
+++ b/inventory/README.md
@@ -0,0 +1,9 @@
+# OpenShift Ansible inventory config files
+
+You can install OpenShift on:
+
+* [Amazon Web Services](aws/hosts/)
+* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your bare metal servers
+* [GCE](gce/) (Google Compute Engine)
+* [libvirt](libvirt/hosts/)
+* [OpenStack](openstack/hosts/)
diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini
index aa0f9090f..64c097d47 100644
--- a/inventory/aws/hosts/ec2.ini
+++ b/inventory/aws/hosts/ec2.ini
@@ -29,17 +29,32 @@ regions_exclude = us-gov-west-1,cn-north-1
# in the event of a collision.
destination_variable = public_dns_name
+# This allows you to override the inventory_name with an ec2 variable, instead
+# of using the destination_variable above. Addressing (aka ansible_ssh_host)
+# will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
+hostname_variable = tag_Name
+
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from within EC2. The key of an EC2 tag may optionally be used; however
# the boto instance variables hold precedence in the event of a collision.
-# WARNING: - instances that are in the private vpc, _without_ public ip address
+# WARNING: - instances that are in the private vpc, _without_ public ip address
# will not be listed in the inventory until You set:
-# vpc_destination_variable = 'private_ip_address'
+# vpc_destination_variable = private_ip_address
vpc_destination_variable = ip_address
+# The following two settings allow flexible ansible host naming based on a
+# python format string and a comma-separated list of ec2 tags. Note that:
+#
+# 1) If the tags referenced are not present for some instances, empty strings
+# will be substituted in the format string.
+# 2) This overrides both destination_variable and vpc_destination_variable.
+#
+#destination_format = {0}.{1}.example.com
+#destination_format_tags = Name,environment
+
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
@@ -60,13 +75,16 @@ all_instances = False
# By default, only EC2 instances in the 'running' state are returned. Specify
# EC2 instance states to return as a comma-separated list. This
-# option is overriden when 'all_instances' is True.
+# option is overridden when 'all_instances' is True.
# instance_states = pending, running, shutting-down, terminated, stopping, stopped
# By default, only RDS instances in the 'available' state are returned. Set
# 'all_rds_instances' to True return all RDS instances regardless of state.
all_rds_instances = False
+# Include RDS cluster information (Aurora etc.)
+include_rds_clusters = False
+
# By default, only ElastiCache clusters and nodes in the 'available' state
# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
# to True return all ElastiCache clusters and nodes, regardless of state.
@@ -91,19 +109,16 @@ cache_path = ~/.ansible/tmp
# To disable the cache, set this value to 0
cache_max_age = 300
-# These two settings allow flexible ansible host naming based on a format
-# string and a comma-separated list of ec2 tags. The tags used must be
-# present for all instances, or the code will fail. This overrides both
-# destination_variable and vpc_destination_variable.
-# destination_format = {0}.{1}.rhcloud.com
-# destination_format_tags = Name,environment
-
# Organize groups into a nested/hierarchy instead of a flat namespace.
nested_groups = False
# Replace - tags when creating groups to avoid issues with ansible
replace_dash_in_groups = False
+# If set to true, any tag of the form "a,b,c" is expanded into a list
+# and the results are used to create additional tag_* inventory groups.
+expand_csv_tags = False
+
# The EC2 inventory output can become very large. To manage its size,
# configure which groups should be created.
group_by_instance_id = True
@@ -147,9 +162,28 @@ group_by_elasticache_replication_group = True
# You can use wildcards in filter values also. Below will list instances which
# tag Name value matches webservers1*
-# (ex. webservers15, webservers1a, webservers123 etc)
+# (ex. webservers15, webservers1a, webservers123 etc)
# instance_filters = tag:Name=webservers1*
# A boto configuration profile may be used to separate out credentials
# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
# boto_profile = some-boto-profile-name
+
+
+[credentials]
+
+# The AWS credentials can optionally be specified here. Credentials specified
+# here are ignored if the environment variable AWS_ACCESS_KEY_ID or
+# AWS_PROFILE is set, or if the boto_profile property above is set.
+#
+# Supplying AWS credentials here is not recommended, as it introduces
+# non-trivial security concerns. When going down this route, please make sure
+# to set access permissions for this file correctly, e.g. handle it the same
+# way as you would a private SSH key.
+#
+# Unlike the boto and AWS configure files, this section does not support
+# profiles.
+#
+# aws_access_key_id = AXXXXXXXXXXXXXX
+# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
+# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX
diff --git a/inventory/aws/hosts/ec2.py b/inventory/aws/hosts/ec2.py
index 8b878cafd..b71458a29 100755
--- a/inventory/aws/hosts/ec2.py
+++ b/inventory/aws/hosts/ec2.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python2
+# pylint: skip-file
'''
EC2 external inventory script
@@ -37,6 +38,7 @@ When run against a specific host, this script returns the following variables:
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
+ - ec2_block_devices
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
@@ -131,6 +133,15 @@ from boto import elasticache
from boto import route53
import six
+from ansible.module_utils import ec2 as ec2_utils
+
+HAS_BOTO3 = False
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ pass
+
from six.moves import configparser
from collections import defaultdict
@@ -141,6 +152,7 @@ except ImportError:
class Ec2Inventory(object):
+
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
@@ -157,6 +169,9 @@ class Ec2Inventory(object):
# Boto profile to use (if any)
self.boto_profile = None
+ # AWS credentials.
+ self.credentials = {}
+
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
@@ -224,7 +239,7 @@ class Ec2Inventory(object):
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
- self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
+ self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
@@ -236,6 +251,11 @@ class Ec2Inventory(object):
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
+ if config.has_option('ec2', 'hostname_variable'):
+ self.hostname_variable = config.get('ec2', 'hostname_variable')
+ else:
+ self.hostname_variable = None
+
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.get('ec2', 'destination_format')
@@ -256,6 +276,12 @@ class Ec2Inventory(object):
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
+ # Include RDS cluster instances?
+ if config.has_option('ec2', 'include_rds_clusters'):
+ self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
+ else:
+ self.include_rds_clusters = False
+
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
@@ -318,6 +344,29 @@ class Ec2Inventory(object):
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
+ # AWS credentials (prefer environment variables)
+ if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
+ os.environ.get('AWS_PROFILE')):
+ if config.has_option('credentials', 'aws_access_key_id'):
+ aws_access_key_id = config.get('credentials', 'aws_access_key_id')
+ else:
+ aws_access_key_id = None
+ if config.has_option('credentials', 'aws_secret_access_key'):
+ aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
+ else:
+ aws_secret_access_key = None
+ if config.has_option('credentials', 'aws_security_token'):
+ aws_security_token = config.get('credentials', 'aws_security_token')
+ else:
+ aws_security_token = None
+ if aws_access_key_id:
+ self.credentials = {
+ 'aws_access_key_id': aws_access_key_id,
+ 'aws_secret_access_key': aws_secret_access_key
+ }
+ if aws_security_token:
+ self.credentials['security_token'] = aws_security_token
+
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
@@ -325,10 +374,22 @@ class Ec2Inventory(object):
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
- self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
- self.cache_path_index = cache_dir + "/ansible-ec2.index"
+ cache_name = 'ansible-ec2'
+ aws_profile = lambda: (self.boto_profile or
+ os.environ.get('AWS_PROFILE') or
+ os.environ.get('AWS_ACCESS_KEY_ID') or
+ self.credentials.get('aws_access_key_id', None))
+ if aws_profile():
+ cache_name = '%s-%s' % (cache_name, aws_profile())
+ self.cache_path_cache = cache_dir + "/%s.cache" % cache_name
+ self.cache_path_index = cache_dir + "/%s.index" % cache_name
self.cache_max_age = config.getint('ec2', 'cache_max_age')
+ if config.has_option('ec2', 'expand_csv_tags'):
+ self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
+ else:
+ self.expand_csv_tags = False
+
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
@@ -390,7 +451,10 @@ class Ec2Inventory(object):
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
- for instance_filter in config.get('ec2', 'instance_filters', '').split(','):
+
+ filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f]
+
+ for instance_filter in filters:
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
@@ -409,7 +473,7 @@ class Ec2Inventory(object):
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
- parser.add_argument('--boto-profile', action='store',
+ parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
@@ -427,6 +491,8 @@ class Ec2Inventory(object):
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
+ if self.include_rds_clusters:
+ self.include_rds_clusters_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
@@ -434,7 +500,7 @@ class Ec2Inventory(object):
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
- conn = boto.connect_euca(host=self.eucalyptus_host)
+ conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
@@ -448,7 +514,7 @@ class Ec2Inventory(object):
return connect_args
def connect_to_aws(self, module, region):
- connect_args = {}
+ connect_args = self.credentials
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
@@ -474,15 +540,32 @@ class Ec2Inventory(object):
else:
reservations = conn.get_all_instances()
+ # Pull the tags back in a second step
+ # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
+ # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
+ instance_ids = []
+ for reservation in reservations:
+ instance_ids.extend([instance.id for instance in reservation.instances])
+
+ max_filter_value = 199
+ tags = []
+ for i in range(0, len(instance_ids), max_filter_value):
+ tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]}))
+
+ tags_by_instance_id = defaultdict(dict)
+ for tag in tags:
+ tags_by_instance_id[tag.res_id][tag.name] = tag.value
+
for reservation in reservations:
for instance in reservation.instances:
+ instance.tags = tags_by_instance_id[instance.id]
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
else:
- backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
+ backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances')
@@ -493,9 +576,14 @@ class Ec2Inventory(object):
try:
conn = self.connect_to_aws(rds, region)
if conn:
- instances = conn.get_all_dbinstances()
- for instance in instances:
- self.add_rds_instance(instance, region)
+ marker = None
+ while True:
+ instances = conn.get_all_dbinstances(marker=marker)
+ marker = instances.marker
+ for instance in instances:
+ self.add_rds_instance(instance, region)
+ if not marker:
+ break
except boto.exception.BotoServerError as e:
error = e.reason
@@ -505,6 +593,65 @@ class Ec2Inventory(object):
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
+ def include_rds_clusters_by_region(self, region):
+ if not HAS_BOTO3:
+ self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
+ "getting RDS clusters")
+
+ client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
+
+ marker, clusters = '', []
+ while marker is not None:
+ resp = client.describe_db_clusters(Marker=marker)
+ clusters.extend(resp["DBClusters"])
+ marker = resp.get('Marker', None)
+
+ account_id = boto.connect_iam().get_user().arn.split(':')[4]
+ c_dict = {}
+ for c in clusters:
+ # remove these datetime objects as there is no serialisation to json
+ # currently in place and we don't need the data yet
+ if 'EarliestRestorableTime' in c:
+ del c['EarliestRestorableTime']
+ if 'LatestRestorableTime' in c:
+ del c['LatestRestorableTime']
+
+ if self.ec2_instance_filters == {}:
+ matches_filter = True
+ else:
+ matches_filter = False
+
+ try:
+ # arn:aws:rds:<region>:<account number>:<resourcetype>:<name>
+ tags = client.list_tags_for_resource(
+ ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
+ c['Tags'] = tags['TagList']
+
+ if self.ec2_instance_filters:
+ for filter_key, filter_values in self.ec2_instance_filters.items():
+ # get AWS tag key e.g. tag:env will be 'env'
+ tag_name = filter_key.split(":", 1)[1]
+ # Filter values is a list (if you put multiple values for the same tag name)
+ matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
+
+ if matches_filter:
+ # it matches a filter, so stop looking for further matches
+ break
+
+ except Exception as e:
+ if e.message.find('DBInstanceNotFound') >= 0:
+ # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
+ # Ignore errors when trying to find tags for these
+ pass
+
+ # ignore empty clusters caused by AWS bug
+ if len(c['DBClusterMembers']) == 0:
+ continue
+ elif matches_filter:
+ c_dict[c['DBClusterIdentifier']] = c
+
+ self.inventory['db_clusters'] = c_dict
+
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
@@ -513,7 +660,7 @@ class Ec2Inventory(object):
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
- conn = elasticache.connect_to_region(region)
+ conn = self.connect_to_aws(elasticache, region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
@@ -530,7 +677,7 @@ class Ec2Inventory(object):
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
- # CacheNodes. Because of that wo can't make use of the get_list
+ # CacheNodes. Because of that we can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
@@ -549,7 +696,7 @@ class Ec2Inventory(object):
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
- conn = elasticache.connect_to_region(region)
+ conn = self.connect_to_aws(elasticache, region)
if conn:
response = conn.describe_replication_groups()
@@ -564,7 +711,7 @@ class Ec2Inventory(object):
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
- # Because of that wo can't make use of the get_list method in the
+ # Because of that we can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
@@ -618,7 +765,7 @@ class Ec2Inventory(object):
# Select the best destination address
if self.destination_format and self.destination_format_tags:
- dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ])
+ dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ])
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
@@ -632,32 +779,46 @@ class Ec2Inventory(object):
# Skip instances we cannot address (e.g. private VPC subnet)
return
+ # Set the inventory name
+ hostname = None
+ if self.hostname_variable:
+ if self.hostname_variable.startswith('tag_'):
+ hostname = instance.tags.get(self.hostname_variable[4:], None)
+ else:
+ hostname = getattr(instance, self.hostname_variable)
+
+ # If we can't get a nice hostname, use the destination address
+ if not hostname:
+ hostname = dest
+ else:
+ hostname = self.to_safe(hostname).lower()
+
# if we only want to include hosts that match a pattern, skip those that don't
- if self.pattern_include and not self.pattern_include.match(dest):
+ if self.pattern_include and not self.pattern_include.match(hostname):
return
# if we need to exclude hosts that match a pattern, skip those
- if self.pattern_exclude and self.pattern_exclude.match(dest):
+ if self.pattern_exclude and self.pattern_exclude.match(hostname):
return
# Add to index
- self.index[dest] = [region, instance.id]
+ self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
- self.inventory[instance.id] = [dest]
+ self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
- self.push(self.inventory, region, dest)
+ self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
- self.push(self.inventory, instance.placement, dest)
+ self.push(self.inventory, instance.placement, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
@@ -666,28 +827,28 @@ class Ec2Inventory(object):
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
- self.push(self.inventory, ami_id, dest)
+ self.push(self.inventory, ami_id, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
- self.push(self.inventory, type_name, dest)
+ self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
- self.push(self.inventory, key_name, dest)
+ self.push(self.inventory, key_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
- self.push(self.inventory, vpc_id_name, dest)
+ self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
@@ -696,44 +857,51 @@ class Ec2Inventory(object):
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
- self.push(self.inventory, key, dest)
+ self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
- self.fail_with_error('\n'.join(['Package boto seems a bit older.',
+ self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
- if v:
- key = self.to_safe("tag_" + k + "=" + v)
+ if self.expand_csv_tags and v and ',' in v:
+ values = map(lambda x: x.strip(), v.split(','))
else:
- key = self.to_safe("tag_" + k)
- self.push(self.inventory, key, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
+ values = [v]
+
+ for v in values:
if v:
- self.push_group(self.inventory, self.to_safe("tag_" + k), key)
+ key = self.to_safe("tag_" + k + "=" + v)
+ else:
+ key = self.to_safe("tag_" + k)
+ self.push(self.inventory, key, hostname)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
+ if v:
+ self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
- self.push(self.inventory, name, dest)
+ self.push(self.inventory, name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
- self.push(self.inventory, 'tag_none', dest)
+ self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
- self.push(self.inventory, 'ec2', dest)
+ self.push(self.inventory, 'ec2', hostname)
- self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+ self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
+ self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_rds_instance(self, instance, region):
@@ -751,24 +919,38 @@ class Ec2Inventory(object):
# Skip instances we cannot address (e.g. private VPC subnet)
return
+ # Set the inventory name
+ hostname = None
+ if self.hostname_variable:
+ if self.hostname_variable.startswith('tag_'):
+ hostname = instance.tags.get(self.hostname_variable[4:], None)
+ else:
+ hostname = getattr(instance, self.hostname_variable)
+
+ # If we can't get a nice hostname, use the destination address
+ if not hostname:
+ hostname = dest
+
+ hostname = self.to_safe(hostname).lower()
+
# Add to index
- self.index[dest] = [region, instance.id]
+ self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
- self.inventory[instance.id] = [dest]
+ self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
- self.push(self.inventory, region, dest)
+ self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
- self.push(self.inventory, instance.availability_zone, dest)
+ self.push(self.inventory, instance.availability_zone, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
@@ -777,14 +959,14 @@ class Ec2Inventory(object):
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
- self.push(self.inventory, type_name, dest)
+ self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
- self.push(self.inventory, vpc_id_name, dest)
+ self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
@@ -793,31 +975,32 @@ class Ec2Inventory(object):
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
- self.push(self.inventory, key, dest)
+ self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
- self.fail_with_error('\n'.join(['Package boto seems a bit older.',
+ self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by engine
if self.group_by_rds_engine:
- self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
+ self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
- self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
+ self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
- self.push(self.inventory, 'rds', dest)
+ self.push(self.inventory, 'rds', hostname)
- self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+ self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
+ self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
@@ -1130,6 +1313,8 @@ class Ec2Inventory(object):
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
+ if self.expand_csv_tags and ',' in v:
+ v = list(map(lambda x: x.strip(), v.split(',')))
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
@@ -1140,6 +1325,10 @@ class Ec2Inventory(object):
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
+ elif key == 'ec2_block_device_mapping':
+ instance_vars["ec2_block_devices"] = {}
+ for k, v in value.items():
+ instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id
else:
pass
# TODO Product codes if someone finds them useful
@@ -1320,4 +1509,3 @@ class Ec2Inventory(object):
# Run the script
Ec2Inventory()
-
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 8b3a6e403..c800c690c 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -30,7 +30,7 @@ deployment_type=origin
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v1.2
+openshift_release=v1.4
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
@@ -65,10 +65,6 @@ openshift_release=v1.2
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_oauth_template=/path/to/login-template.html
-# Configure loggingPublicURL in the master config for aggregate logging
-# See: https://docs.openshift.org/latest/install_config/aggregate_logging.html
-#openshift_master_logging_public_url=https://kibana.example.com
-
# Configure imagePolicyConfig in the master config
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
@@ -88,7 +84,7 @@ openshift_release=v1.2
# Specify exact version of Docker to configure or upgrade to.
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
-# docker_version="1.10.3"
+# docker_version="1.12.1"
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
@@ -99,7 +95,6 @@ openshift_release=v1.2
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
-
# Origin copr repo
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
@@ -114,17 +109,46 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
# LDAP auth
-#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
-# Configuring the ldap ca certificate
+#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+#
+# Configure LDAP CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "ca" key set
+# within the LDAPPasswordIdentityProvider.
+#
#openshift_master_ldap_ca=<ca text>
# or
#openshift_master_ldap_ca_file=<path to local ca file to use>
-# Available variables for configuring certificates for other identity providers:
-#openshift_master_openid_ca
-#openshift_master_openid_ca_file
-#openshift_master_request_header_ca
-#openshift_master_request_header_ca_file
+# OpenID auth
+#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
+#
+# Configure OpenID CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "ca" key set
+# within the OpenIDIdentityProvider.
+#
+#openshift_master_openid_ca=<ca text>
+# or
+#openshift_master_openid_ca_file=<path to local ca file to use>
+
+# Request header auth
+#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
+#
+# Configure request header CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "clientCA"
+# key set within the RequestHeaderIdentityProvider.
+#
+#openshift_master_request_header_ca=<ca text>
+# or
+#openshift_master_request_header_ca_file=<path to local ca file to use>
# Cloud Provider Configuration
#
@@ -145,6 +169,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
#openshift_cloudprovider_openstack_username=username
#openshift_cloudprovider_openstack_password=password
+#openshift_cloudprovider_openstack_domain_id=domain_id
+#openshift_cloudprovider_openstack_domain_name=domain_name
#openshift_cloudprovider_openstack_tenant_id=tenant_id
#openshift_cloudprovider_openstack_tenant_name=tenant_name
#openshift_cloudprovider_openstack_region=region
@@ -325,6 +351,22 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_pullthrough=true
#openshift_hosted_registry_acceptschema2=true
#openshift_hosted_registry_enforcequota=true
+#
+# Any S3 service (Minio, ExoScale, ...): Basically the same as above
+# but with regionendpoint configured
+# S3 bucket must already exist.
+#openshift_hosted_registry_storage_kind=object
+#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_accesskey=access_key_id
+#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
+#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
+#openshift_hosted_registry_storage_s3_bucket=bucket_name
+#openshift_hosted_registry_storage_s3_region=bucket_region
+#openshift_hosted_registry_storage_s3_chunksize=26214400
+#openshift_hosted_registry_storage_s3_rootdirectory=/registry
+#openshift_hosted_registry_pullthrough=true
+#openshift_hosted_registry_acceptschema2=true
+#openshift_hosted_registry_enforcequota=true
# Metrics deployment
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
@@ -371,6 +413,55 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+# Logging deployment
+#
+# Currently logging deployment is disabled by default, enable it by setting this
+#openshift_hosted_logging_deploy=true
+#
+# Logging storage config
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/logging"
+#openshift_hosted_logging_storage_kind=nfs
+#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_logging_storage_nfs_directory=/exports
+#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_logging_storage_volume_name=logging
+#openshift_hosted_logging_storage_volume_size=10Gi
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/logging"
+#openshift_hosted_logging_storage_kind=nfs
+#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_logging_storage_host=nfs.example.com
+#openshift_hosted_logging_storage_nfs_directory=/exports
+#openshift_hosted_logging_storage_volume_name=logging
+#openshift_hosted_logging_storage_volume_size=10Gi
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_hosted_logging_storage_kind=dynamic
+#
+# Option D - none -- Logging will use emptydir volumes which are destroyed when
+# pods are deleted
+#
+# Other Logging Options -- Common items you may wish to reconfigure, for the complete
+# list of options please see roles/openshift_hosted_logging/README.md
+#
+# Configure loggingPublicURL in the master config for aggregate logging, defaults
+# to https://kibana.{{ openshift_master_default_subdomain }}
+#openshift_master_logging_public_url=https://kibana.example.com
+# Configure the number of elastic search nodes, unless you're using dynamic provisioning
+# this value must be 1
+#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_hosted_logging_hostname=logging.apps.example.com
+# Configure the prefix and version for the deployer image
+#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
+#openshift_hosted_logging_deployer_version=3.3.0
+
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -381,7 +472,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
-#osm_cluster_network_cidr=10.1.0.0/16
+#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -394,9 +485,16 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# your nodes, pods, or service CIDRs for security reasons.
#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
-# Configure number of bits to allocate to each host’s subnet e.g. 8
-# would mean a /24 network on the host.
-#osm_host_subnet_length=8
+# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
+# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
+# be assigned. It may contain a single CIDR that will be allocated from. For
+# security reasons, you should ensure that this range does not overlap with
+# the CIDRs reserved for external IPs, nodes, pods, or services.
+#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
+
+# Configure number of bits to allocate to each host’s subnet e.g. 9
+# would mean a /23 network on the host.
+#osm_host_subnet_length=9
# Configure master API and console ports.
#openshift_master_api_port=8443
@@ -470,7 +568,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure dnsIP in the node config
#openshift_dns_ip=172.30.0.1
-# Configure node kubelet arguments
+# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
@@ -490,7 +588,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# be used with 1.0 and 3.0.
#openshift_use_dnsmasq=False
# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
-# This is useful for POC environments where DNS may not actually be available yet.
+# This is useful for POC environments where DNS may not actually be available yet or to set
+# options like 'strict-order' to alter dnsmasq configuration.
#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
# Global Proxy Configuration
@@ -517,11 +616,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
# Or you may optionally define your own serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
-
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"kind":"BuildDefaultsConfig","apiVersion":"v1","gitHTTPSProxy":"http://proxy.example.com.redhat.com:3128","gitHTTPProxy":"http://proxy.example.com.redhat.com:3128","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"HTTPS_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}]}}'
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
+# Admission plugin config
+#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
+
# Configure usage of openshift_clock role.
#openshift_clock_enabled=true
@@ -537,6 +638,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Enable API service auditing, available as of 1.3
#openshift_master_audit_config={"basicAuditEnabled": true}
+# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
+# by deployment_type=origin
+#openshift_enable_origin_repo=false
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
@@ -544,8 +649,10 @@ ose3-master[1:3]-ansible.test.example.com
[etcd]
ose3-etcd[1:3]-ansible.test.example.com
+# NOTE: Containerized load balancer hosts are not yet supported, if using a global
+# containerized=true host variable we must set to false.
[lb]
-ose3-lb-ansible.test.example.com
+ose3-lb-ansible.test.example.com containerized=false
# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
# However, in order to ensure that your masters are not burdened with running pods you should
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index af653f850..0e503f28d 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -30,7 +30,7 @@ deployment_type=openshift-enterprise
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.2
+openshift_release=v3.4
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
@@ -65,10 +65,6 @@ openshift_release=v3.2
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_oauth_template=/path/to/login-template.html
-# Configure loggingPublicURL in the master config for aggregate logging
-# See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html
-#openshift_master_logging_public_url=https://kibana.example.com
-#
# Configure imagePolicyConfig in the master config
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
@@ -88,7 +84,7 @@ openshift_release=v3.2
# Specify exact version of Docker to configure or upgrade to.
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
-# docker_version="1.10.3"
+# docker_version="1.12.1"
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
@@ -113,17 +109,46 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
# LDAP auth
-#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
-# Configuring the ldap ca certificate
+#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+#
+# Configure LDAP CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "ca" key set
+# within the LDAPPasswordIdentityProvider.
+#
#openshift_master_ldap_ca=<ca text>
# or
#openshift_master_ldap_ca_file=<path to local ca file to use>
-# Available variables for configuring certificates for other identity providers:
-#openshift_master_openid_ca
-#openshift_master_openid_ca_file
-#openshift_master_request_header_ca
-#openshift_master_request_header_ca_file
+# OpenID auth
+#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
+#
+# Configure OpenID CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "ca" key set
+# within the OpenIDIdentityProvider.
+#
+#openshift_master_openid_ca=<ca text>
+# or
+#openshift_master_openid_ca_file=<path to local ca file to use>
+
+# Request header auth
+#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
+#
+# Configure request header CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "clientCA"
+# key set within the RequestHeaderIdentityProvider.
+#
+#openshift_master_request_header_ca=<ca text>
+# or
+#openshift_master_request_header_ca_file=<path to local ca file to use>
# Cloud Provider Configuration
#
@@ -144,6 +169,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
#openshift_cloudprovider_openstack_username=username
#openshift_cloudprovider_openstack_password=password
+#openshift_cloudprovider_openstack_domain_id=domain_id
+#openshift_cloudprovider_openstack_domain_name=domain_name
#openshift_cloudprovider_openstack_tenant_id=tenant_id
#openshift_cloudprovider_openstack_tenant_name=tenant_name
#openshift_cloudprovider_openstack_region=region
@@ -324,6 +351,22 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_pullthrough=true
#openshift_hosted_registry_acceptschema2=true
#openshift_hosted_registry_enforcequota=true
+#
+# Any S3 service (Minio, ExoScale, ...): Basically the same as above
+# but with regionendpoint configured
+# S3 bucket must already exist.
+#openshift_hosted_registry_storage_kind=object
+#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_accesskey=access_key_id
+#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
+#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
+#openshift_hosted_registry_storage_s3_bucket=bucket_name
+#openshift_hosted_registry_storage_s3_region=bucket_region
+#openshift_hosted_registry_storage_s3_chunksize=26214400
+#openshift_hosted_registry_storage_s3_rootdirectory=/registry
+#openshift_hosted_registry_pullthrough=true
+#openshift_hosted_registry_acceptschema2=true
+#openshift_hosted_registry_enforcequota=true
# Metrics deployment
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
@@ -370,6 +413,54 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+# Logging deployment
+#
+# Currently logging deployment is disabled by default, enable it by setting this
+#openshift_hosted_logging_deploy=true
+#
+# Logging storage config
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/logging"
+#openshift_hosted_logging_storage_kind=nfs
+#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_logging_storage_nfs_directory=/exports
+#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_logging_storage_volume_name=logging
+#openshift_hosted_logging_storage_volume_size=10Gi
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/logging"
+#openshift_hosted_logging_storage_kind=nfs
+#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_logging_storage_host=nfs.example.com
+#openshift_hosted_logging_storage_nfs_directory=/exports
+#openshift_hosted_logging_storage_volume_name=logging
+#openshift_hosted_logging_storage_volume_size=10Gi
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_hosted_logging_storage_kind=dynamic
+#
+# Option D - none -- Logging will use emptydir volumes which are destroyed when
+# pods are deleted
+#
+# Other Logging Options -- Common items you may wish to reconfigure, for the complete
+# list of options please see roles/openshift_hosted_logging/README.md
+#
+# Configure loggingPublicURL in the master config for aggregate logging, defaults
+# to https://kibana.{{ openshift_master_default_subdomain }}
+#openshift_master_logging_public_url=https://kibana.example.com
+# Configure the number of elastic search nodes, unless you're using dynamic provisioning
+# this value must be 1
+#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_hosted_logging_hostname=logging.apps.example.com
+# Configure the prefix and version for the deployer image
+#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
+#openshift_hosted_logging_deployer_version=3.3.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -381,7 +472,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
-#osm_cluster_network_cidr=10.1.0.0/16
+#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -394,9 +485,16 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# your nodes, pods, or service CIDRs for security reasons.
#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
-# Configure number of bits to allocate to each host’s subnet e.g. 8
-# would mean a /24 network on the host.
-#osm_host_subnet_length=8
+# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
+# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
+# be assigned. It may contain a single CIDR that will be allocated from. For
+# security reasons, you should ensure that this range does not overlap with
+# the CIDRs reserved for external IPs, nodes, pods, or services.
+#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
+
+# Configure number of bits to allocate to each host’s subnet e.g. 9
+# would mean a /23 network on the host.
+#osm_host_subnet_length=9
# Configure master API and console ports.
#openshift_master_api_port=8443
@@ -470,7 +568,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure dnsIP in the node config
#openshift_dns_ip=172.30.0.1
-# Configure node kubelet arguments
+# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
@@ -490,7 +588,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# be used with 1.0 and 3.0.
#openshift_use_dnsmasq=False
# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
-# This is useful for POC environments where DNS may not actually be available yet.
+# This is useful for POC environments where DNS may not actually be available yet or to set
+# options like 'strict-order' to alter dnsmasq configuration.
#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
# Global Proxy Configuration
@@ -517,11 +616,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
# Or you may optionally define your own serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
-
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"kind":"BuildDefaultsConfig","apiVersion":"v1","gitHTTPSProxy":"http://proxy.example.com.redhat.com:3128","gitHTTPProxy":"http://proxy.example.com.redhat.com:3128","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"HTTPS_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}]}}'
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
+# Admission plugin config
+#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
+
# Configure usage of openshift_clock role.
#openshift_clock_enabled=true
@@ -544,8 +645,10 @@ ose3-master[1:3]-ansible.test.example.com
[etcd]
ose3-etcd[1:3]-ansible.test.example.com
+# NOTE: Containerized load balancer hosts are not yet supported, if using a global
+# containerized=true host variable we must set to false.
[lb]
-ose3-lb-ansible.test.example.com
+ose3-lb-ansible.test.example.com containerized=false
# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
# However, in order to ensure that your masters are not burdened with running pods you should
diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py
index 99746cdbf..2be46a58c 100755
--- a/inventory/gce/hosts/gce.py
+++ b/inventory/gce/hosts/gce.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python2
+# pylint: skip-file
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
@@ -69,7 +70,8 @@ Examples:
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
-Version: 0.0.1
+Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
+Version: 0.0.3
'''
__requires__ = ['pycrypto>=2.6']
@@ -83,13 +85,19 @@ except ImportError:
pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
-USER_AGENT_VERSION="v1"
+USER_AGENT_VERSION="v2"
import sys
import os
import argparse
+
+from time import time
+
import ConfigParser
+import logging
+logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
+
try:
import json
except ImportError:
@@ -100,31 +108,103 @@ try:
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
- print("GCE inventory script requires libcloud >= 0.13")
- sys.exit(1)
+ sys.exit("GCE inventory script requires libcloud >= 0.13")
+
+
+class CloudInventoryCache(object):
+ def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
+ cache_max_age=300):
+ cache_dir = os.path.expanduser(cache_path)
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+ self.cache_path_cache = os.path.join(cache_dir, cache_name)
+
+ self.cache_max_age = cache_max_age
+
+ def is_valid(self, max_age=None):
+ ''' Determines if the cache files have expired, or if it is still valid '''
+
+ if max_age is None:
+ max_age = self.cache_max_age
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + max_age) > current_time:
+ return True
+
+ return False
+
+ def get_all_data_from_cache(self, filename=''):
+ ''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
+
+ data = ''
+ if not filename:
+ filename = self.cache_path_cache
+ with open(filename, 'r') as cache:
+ data = cache.read()
+ return json.loads(data)
+
+ def write_to_cache(self, data, filename=''):
+ ''' Writes data to file as JSON. Returns True. '''
+ if not filename:
+ filename = self.cache_path_cache
+ json_data = json.dumps(data)
+ with open(filename, 'w') as cache:
+ cache.write(json_data)
+ return True
class GceInventory(object):
def __init__(self):
+ # Cache object
+ self.cache = None
+ # dictionary containing inventory read from disk
+ self.inventory = {}
+
# Read settings and parse CLI arguments
self.parse_cli_args()
+ self.config = self.get_config()
self.driver = self.get_gce_driver()
+ self.ip_type = self.get_inventory_options()
+ if self.ip_type:
+ self.ip_type = self.ip_type.lower()
+
+ # Cache management
+ start_inventory_time = time()
+ cache_used = False
+ if self.args.refresh_cache or not self.cache.is_valid():
+ self.do_api_calls_update_cache()
+ else:
+ self.load_inventory_from_cache()
+ cache_used = True
+ self.inventory['_meta']['stats'] = {'use_cache': True}
+ self.inventory['_meta']['stats'] = {
+ 'inventory_load_time': time() - start_inventory_time,
+ 'cache_used': cache_used
+ }
# Just display data for specific host
if self.args.host:
- print(self.json_format_dict(self.node_to_dict(
- self.get_instance(self.args.host)),
- pretty=self.args.pretty))
- sys.exit(0)
-
- # Otherwise, assume user wants all instances grouped
- print(self.json_format_dict(self.group_instances(),
- pretty=self.args.pretty))
+ print(self.json_format_dict(
+ self.inventory['_meta']['hostvars'][self.args.host],
+ pretty=self.args.pretty))
+ else:
+ # Otherwise, assume user wants all instances grouped
+ zones = self.parse_env_zones()
+ print(self.json_format_dict(self.inventory,
+ pretty=self.args.pretty))
sys.exit(0)
- def get_gce_driver(self):
- """Determine the GCE authorization settings and return a
- libcloud driver.
+ def get_config(self):
+ """
+ Reads the settings from the gce.ini file.
+
+ Populates a SafeConfigParser object with defaults and
+ attempts to read an .ini-style configuration from the filename
+ specified in GCE_INI_PATH. If the environment variable is
+ not present, the filename defaults to gce.ini in the current
+ working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
@@ -139,14 +219,57 @@ class GceInventory(object):
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'libcloud_secrets': '',
+ 'inventory_ip_type': '',
+ 'cache_path': '~/.ansible/tmp',
+ 'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
+ if 'inventory' not in config.sections():
+ config.add_section('inventory')
+ if 'cache' not in config.sections():
+ config.add_section('cache')
+
config.read(gce_ini_path)
+ #########
+ # Section added for processing ini settings
+ #########
+
+ # Set the instance_states filter based on config file options
+ self.instance_states = []
+ if config.has_option('gce', 'instance_states'):
+ states = config.get('gce', 'instance_states')
+ # Ignore if instance_states is an empty string.
+ if states:
+ self.instance_states = states.split(',')
+
+ # Caching
+ cache_path = config.get('cache', 'cache_path')
+ cache_max_age = config.getint('cache', 'cache_max_age')
+ # TOOD(supertom): support project-specific caches
+ cache_name = 'ansible-gce.cache'
+ self.cache = CloudInventoryCache(cache_path=cache_path,
+ cache_max_age=cache_max_age,
+ cache_name=cache_name)
+ return config
+
+ def get_inventory_options(self):
+ """Determine inventory options. Environment variables always
+ take precedence over configuration files."""
+ ip_type = self.config.get('inventory', 'inventory_ip_type')
+ # If the appropriate environment variables are set, they override
+ # other configuration
+ ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
+ return ip_type
+
+ def get_gce_driver(self):
+ """Determine the GCE authorization settings and return a
+ libcloud driver.
+ """
# Attempt to get GCE params from a configuration file, if one
# exists.
- secrets_path = config.get('gce', 'libcloud_secrets')
+ secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
@@ -160,8 +283,7 @@ class GceInventory(object):
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
- print(err)
- sys.exit(1)
+ sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
@@ -172,10 +294,10 @@ class GceInventory(object):
pass
if not secrets_found:
args = [
- config.get('gce','gce_service_account_email_address'),
- config.get('gce','gce_service_account_pem_file_path')
+ self.config.get('gce','gce_service_account_email_address'),
+ self.config.get('gce','gce_service_account_pem_file_path')
]
- kwargs = {'project': config.get('gce', 'gce_project_id')}
+ kwargs = {'project': self.config.get('gce', 'gce_project_id')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
@@ -190,6 +312,14 @@ class GceInventory(object):
)
return gce
+ def parse_env_zones(self):
+ '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
+ If provided, this will be used to filter the results of the grouped_instances call'''
+ import csv
+ reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
+ zones = [r for r in reader]
+ return [z for z in zones[0]]
+
def parse_cli_args(self):
''' Command line argument processing '''
@@ -201,6 +331,9 @@ class GceInventory(object):
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
+ parser.add_argument(
+ '--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
@@ -210,11 +343,17 @@ class GceInventory(object):
if inst is None:
return {}
- if inst.extra['metadata'].has_key('items'):
+ if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ # default to exernal IP unless user has specified they prefer internal
+ if self.ip_type == 'internal':
+ ssh_host = inst.private_ips[0]
+ else:
+ ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
@@ -230,29 +369,67 @@ class GceInventory(object):
'gce_metadata': md,
'gce_network': net,
# Hosts don't have a public name, so we add an IP
- 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+ 'ansible_ssh_host': ssh_host
}
- def get_instance(self, instance_name):
- '''Gets details about a specific instance '''
+ def load_inventory_from_cache(self):
+ ''' Loads inventory from JSON on disk. '''
+
try:
- return self.driver.ex_get_node(instance_name)
+ self.inventory = self.cache.get_all_data_from_cache()
+ hosts = self.inventory['_meta']['hostvars']
except Exception as e:
- return None
-
- def group_instances(self):
+ print(
+ "Invalid inventory file %s. Please rebuild with -refresh-cache option."
+ % (self.cache.cache_path_cache))
+ raise
+
+ def do_api_calls_update_cache(self):
+ ''' Do API calls and save data in cache. '''
+ zones = self.parse_env_zones()
+ data = self.group_instances(zones)
+ self.cache.write_to_cache(data)
+ self.inventory = data
+
+ def list_nodes(self):
+ all_nodes = []
+ params, more_results = {'maxResults': 500}, True
+ while more_results:
+ self.driver.connection.gce_params=params
+ all_nodes.extend(self.driver.list_nodes())
+ more_results = 'pageToken' in params
+ return all_nodes
+
+ def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
- for node in self.driver.list_nodes():
+ for node in self.list_nodes():
+
+ # This check filters on the desired instance states defined in the
+ # config file with the instance_states config option.
+ #
+ # If the instance_states list is _empty_ then _ALL_ states are returned.
+ #
+ # If the instance_states list is _populated_ then check the current
+ # state against the instance_states list
+ if self.instance_states and not node.extra['status'] in self.instance_states:
+ continue
+
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
- if groups.has_key(zone): groups[zone].append(name)
+
+ # To avoid making multiple requests per zone
+ # we list all nodes and then filter the results
+ if zones and zone not in zones:
+ continue
+
+ if zone in groups: groups[zone].append(name)
else: groups[zone] = [name]
tags = node.extra['tags']
@@ -261,25 +438,25 @@ class GceInventory(object):
tag = t[6:]
else:
tag = 'tag_%s' % t
- if groups.has_key(tag): groups[tag].append(name)
+ if tag in groups: groups[tag].append(name)
else: groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
- if groups.has_key(net): groups[net].append(name)
+ if net in groups: groups[net].append(name)
else: groups[net] = [name]
machine_type = node.size
- if groups.has_key(machine_type): groups[machine_type].append(name)
+ if machine_type in groups: groups[machine_type].append(name)
else: groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
- if groups.has_key(image): groups[image].append(name)
+ if image in groups: groups[image].append(name)
else: groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
- if groups.has_key(stat): groups[stat].append(name)
+ if stat in groups: groups[stat].append(name)
else: groups[stat] = [name]
groups["_meta"] = meta
@@ -295,6 +472,6 @@ class GceInventory(object):
else:
return json.dumps(data)
-
# Run the script
-GceInventory()
+if __name__ == '__main__':
+ GceInventory()
diff --git a/inventory/hosts b/inventory/hosts
deleted file mode 100644
index 72b7ae646..000000000
--- a/inventory/hosts
+++ /dev/null
@@ -1,2 +0,0 @@
-# Eventually we'll add the GCE, AWS, etc dynamic inventories, but for now...
-localhost
diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py
index 1c9c17308..d63e07b64 100755
--- a/inventory/libvirt/hosts/libvirt_generic.py
+++ b/inventory/libvirt/hosts/libvirt_generic.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python2
+# pylint: skip-file
'''
libvirt external inventory script
@@ -60,11 +61,11 @@ class LibvirtInventory(object):
self.parse_cli_args()
if self.args.host:
- print _json_format_dict(self.get_host_info(), self.args.pretty)
+ print(_json_format_dict(self.get_host_info(), self.args.pretty))
elif self.args.list:
- print _json_format_dict(self.get_inventory(), self.args.pretty)
+ print(_json_format_dict(self.get_inventory(), self.args.pretty))
else: # default action with no options
- print _json_format_dict(self.get_inventory(), self.args.pretty)
+ print(_json_format_dict(self.get_inventory(), self.args.pretty))
def read_settings(self):
''' Reads the settings from the libvirt.ini file '''
@@ -114,12 +115,12 @@ class LibvirtInventory(object):
conn = libvirt.openReadOnly(self.libvirt_uri)
if conn is None:
- print "Failed to open connection to %s" % self.libvirt_uri
+ print("Failed to open connection to %s" % self.libvirt_uri)
sys.exit(1)
domains = conn.listAllDomains()
if domains is None:
- print "Failed to list domains for connection %s" % self.libvirt_uri
+ print("Failed to list domains for connection %s" % self.libvirt_uri)
sys.exit(1)
for domain in domains:
diff --git a/inventory/openstack/hosts/openstack.py b/inventory/openstack/hosts/openstack.py
index 0d92eae11..deefd3b5d 100755
--- a/inventory/openstack/hosts/openstack.py
+++ b/inventory/openstack/hosts/openstack.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+# pylint: skip-file
# Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>