summaryrefslogtreecommitdiffstats
path: root/inventory
diff options
context:
space:
mode:
Diffstat (limited to 'inventory')
-rw-r--r--inventory/README.md9
-rw-r--r--inventory/aws/hosts/ec2.ini97
-rwxr-xr-xinventory/aws/hosts/ec2.py645
-rw-r--r--inventory/aws/hosts/hosts2
-rw-r--r--inventory/byo/.gitignore1
-rw-r--r--inventory/byo/hosts43
-rw-r--r--inventory/byo/hosts.openstack37
-rw-r--r--inventory/byo/hosts.origin.example658
-rw-r--r--inventory/byo/hosts.ose.example654
-rwxr-xr-xinventory/gce/hosts/gce.py27
-rw-r--r--inventory/gce/hosts/hosts2
-rw-r--r--inventory/hosts2
-rw-r--r--inventory/libvirt/hosts/hosts2
-rwxr-xr-xinventory/multi_ec2.py363
-rw-r--r--inventory/multi_ec2.yaml.example32
-rw-r--r--inventory/openshift-ansible-inventory.spec82
-rw-r--r--inventory/openstack/hosts/hosts2
-rw-r--r--inventory/openstack/hosts/nova.ini45
-rwxr-xr-xinventory/openstack/hosts/nova.py224
-rwxr-xr-xinventory/openstack/hosts/openstack.py246
20 files changed, 2309 insertions, 864 deletions
diff --git a/inventory/README.md b/inventory/README.md
new file mode 100644
index 000000000..b8edfcbb0
--- /dev/null
+++ b/inventory/README.md
@@ -0,0 +1,9 @@
+# OpenShift Ansible inventory config files
+
+You can install OpenShift on:
+
+* [Amazon Web Services](aws/hosts/)
+* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your bare metal servers
+* [GCE](gce/) (Google Compute Engine)
+* [libvirt](libviert/hosts/)
+* [OpenStack](openstack/hosts/)
diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini
index eaab0a410..5ee51c84f 100644
--- a/inventory/aws/hosts/ec2.ini
+++ b/inventory/aws/hosts/ec2.ini
@@ -24,24 +24,61 @@ regions_exclude = us-gov-west-1,cn-north-1
# This is the normal destination variable to use. If you are running Ansible
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
# running Ansible from within EC2, then perhaps you want to use the internal
-# address, and should set this to 'private_dns_name'.
+# address, and should set this to 'private_dns_name'. The key of an EC2 tag
+# may optionally be used; however the boto instance variables hold precedence
+# in the event of a collision.
destination_variable = public_dns_name
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
-# be run from with EC2.
+# be run from within EC2. The key of an EC2 tag may optionally be used; however
+# the boto instance variables hold precedence in the event of a collision.
+# WARNING: - instances that are in the private vpc, _without_ public ip address
+# will not be listed in the inventory until You set:
+# vpc_destination_variable = 'private_ip_address'
vpc_destination_variable = ip_address
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
+# To exclude RDS instances from the inventory, uncomment and set to False.
+rds = False
+
+# To exclude ElastiCache instances from the inventory, uncomment and set to False.
+elasticache = False
+
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-separated list.
# route53_excluded_zones = samplezone1.com, samplezone2.com
+# By default, only EC2 instances in the 'running' state are returned. Set
+# 'all_instances' to True to return all instances regardless of state.
+all_instances = False
+
+# By default, only EC2 instances in the 'running' state are returned. Specify
+# EC2 instance states to return as a comma-separated list. This
+# option is overridden when 'all_instances' is True.
+# instance_states = pending, running, shutting-down, terminated, stopping, stopped
+
+# By default, only RDS instances in the 'available' state are returned. Set
+# 'all_rds_instances' to True return all RDS instances regardless of state.
+all_rds_instances = False
+
+# By default, only ElastiCache clusters and nodes in the 'available' state
+# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
+# to True return all ElastiCache clusters and nodes, regardless of state.
+#
+# Note that all_elasticache_nodes only applies to listed clusters. That means
+# if you set all_elastic_clusters to false, no node will be return from
+# unavailable clusters, regardless of the state and to what you set for
+# all_elasticache_nodes.
+all_elasticache_replication_groups = False
+all_elasticache_clusters = False
+all_elasticache_nodes = False
+
# API calls to EC2 are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
@@ -60,3 +97,59 @@ cache_max_age = 300
# destination_variable and vpc_destination_variable.
# destination_format = {0}.{1}.rhcloud.com
# destination_format_tags = Name,environment
+
+# Organize groups into a nested/hierarchy instead of a flat namespace.
+nested_groups = False
+
+# Replace - tags when creating groups to avoid issues with ansible
+replace_dash_in_groups = False
+
+# The EC2 inventory output can become very large. To manage its size,
+# configure which groups should be created.
+group_by_instance_id = True
+group_by_region = True
+group_by_availability_zone = True
+group_by_ami_id = True
+group_by_instance_type = True
+group_by_key_pair = True
+group_by_vpc_id = True
+group_by_security_group = True
+group_by_tag_keys = True
+group_by_tag_none = True
+group_by_route53_names = True
+group_by_rds_engine = True
+group_by_rds_parameter_group = True
+group_by_elasticache_engine = True
+group_by_elasticache_cluster = True
+group_by_elasticache_parameter_group = True
+group_by_elasticache_replication_group = True
+
+# If you only want to include hosts that match a certain regular expression
+# pattern_include = staging-*
+
+# If you want to exclude any hosts that match a certain regular expression
+# pattern_exclude = staging-*
+
+# Instance filters can be used to control which instances are retrieved for
+# inventory. For the full list of possible filters, please read the EC2 API
+# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
+# Filters are key/value pairs separated by '=', to list multiple filters use
+# a list separated by commas. See examples below.
+
+# Retrieve only instances with (key=value) env=staging tag
+# instance_filters = tag:env=staging
+
+# Retrieve only instances with role=webservers OR role=dbservers tag
+# instance_filters = tag:role=webservers,tag:role=dbservers
+
+# Retrieve only t1.micro instances OR instances with tag env=staging
+# instance_filters = instance-type=t1.micro,tag:env=staging
+
+# You can use wildcards in filter values also. Below will list instances which
+# tag Name value matches webservers1*
+# (ex. webservers15, webservers1a, webservers123 etc)
+# instance_filters = tag:Name=webservers1*
+
+# A boto configuration profile may be used to separate out credentials
+# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
+# boto_profile = some-boto-profile-name
diff --git a/inventory/aws/hosts/ec2.py b/inventory/aws/hosts/ec2.py
index f231ff4c2..8b878cafd 100755
--- a/inventory/aws/hosts/ec2.py
+++ b/inventory/aws/hosts/ec2.py
@@ -22,6 +22,12 @@ you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
+If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
+using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
+the AWS_PROFILE variable:
+
+ AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
+
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
@@ -121,8 +127,11 @@ from time import time
import boto
from boto import ec2
from boto import rds
+from boto import elasticache
from boto import route53
-import ConfigParser
+import six
+
+from six.moves import configparser
from collections import defaultdict
try:
@@ -145,9 +154,18 @@ class Ec2Inventory(object):
# Index of hostname (address) to instance ID
self.index = {}
+ # Boto profile to use (if any)
+ self.boto_profile = None
+
# Read settings and parse CLI arguments
- self.read_settings()
self.parse_cli_args()
+ self.read_settings()
+
+ # Make sure that profile_name is not passed at all if not set
+ # as pre 2.24 boto will fall over otherwise
+ if self.boto_profile:
+ if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
+ self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
@@ -166,7 +184,7 @@ class Ec2Inventory(object):
else:
data_to_print = self.json_format_dict(self.inventory, True)
- print data_to_print
+ print(data_to_print)
def is_cache_valid(self):
@@ -184,10 +202,12 @@ class Ec2Inventory(object):
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
-
- config = ConfigParser.SafeConfigParser()
+ if six.PY3:
+ config = configparser.ConfigParser()
+ else:
+ config = configparser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
- ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
+ ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
config.read(ec2_ini_path)
# is eucalyptus?
@@ -236,18 +256,72 @@ class Ec2Inventory(object):
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
- # Return all EC2 and RDS instances (if RDS is enabled)
+ # Include ElastiCache instances?
+ self.elasticache_enabled = True
+ if config.has_option('ec2', 'elasticache'):
+ self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
+
+ # Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
+
+ # Instance states to be gathered in inventory. Default is 'running'.
+ # Setting 'all_instances' to 'yes' overrides this option.
+ ec2_valid_instance_states = [
+ 'pending',
+ 'running',
+ 'shutting-down',
+ 'terminated',
+ 'stopping',
+ 'stopped'
+ ]
+ self.ec2_instance_states = []
+ if self.all_instances:
+ self.ec2_instance_states = ec2_valid_instance_states
+ elif config.has_option('ec2', 'instance_states'):
+ for instance_state in config.get('ec2', 'instance_states').split(','):
+ instance_state = instance_state.strip()
+ if instance_state not in ec2_valid_instance_states:
+ continue
+ self.ec2_instance_states.append(instance_state)
+ else:
+ self.ec2_instance_states = ['running']
+
+ # Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
+ # Return all ElastiCache replication groups? (if ElastiCache is enabled)
+ if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
+ self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
+ else:
+ self.all_elasticache_replication_groups = False
+
+ # Return all ElastiCache clusters? (if ElastiCache is enabled)
+ if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
+ self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
+ else:
+ self.all_elasticache_clusters = False
+
+ # Return all ElastiCache nodes? (if ElastiCache is enabled)
+ if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
+ self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
+ else:
+ self.all_elasticache_nodes = False
+
+ # boto configuration profile (prefer CLI argument)
+ self.boto_profile = self.args.boto_profile
+ if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
+ self.boto_profile = config.get('ec2', 'boto_profile')
+
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
+ if self.boto_profile:
+ cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
@@ -261,6 +335,12 @@ class Ec2Inventory(object):
else:
self.nested_groups = False
+ # Replace dash or not in group names
+ if config.has_option('ec2', 'replace_dash_in_groups'):
+ self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
+ else:
+ self.replace_dash_in_groups = True
+
# Configure which groups should be created.
group_by_options = [
'group_by_instance_id',
@@ -276,6 +356,10 @@ class Ec2Inventory(object):
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
+ 'group_by_elasticache_engine',
+ 'group_by_elasticache_cluster',
+ 'group_by_elasticache_parameter_group',
+ 'group_by_elasticache_replication_group',
]
for option in group_by_options:
if config.has_option('ec2', option):
@@ -290,7 +374,7 @@ class Ec2Inventory(object):
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
- except ConfigParser.NoOptionError, e:
+ except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
@@ -300,7 +384,7 @@ class Ec2Inventory(object):
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
- except ConfigParser.NoOptionError, e:
+ except configparser.NoOptionError:
self.pattern_exclude = None
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
@@ -325,6 +409,8 @@ class Ec2Inventory(object):
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
+ parser.add_argument('--boto-profile', action='store',
+ help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
@@ -338,30 +424,52 @@ class Ec2Inventory(object):
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
+ if self.elasticache_enabled:
+ self.get_elasticache_clusters_by_region(region)
+ self.get_elasticache_replication_groups_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
+ def connect(self, region):
+ ''' create connection to api server'''
+ if self.eucalyptus:
+ conn = boto.connect_euca(host=self.eucalyptus_host)
+ conn.APIVersion = '2010-08-31'
+ else:
+ conn = self.connect_to_aws(ec2, region)
+ return conn
+
+ def boto_fix_security_token_in_profile(self, connect_args):
+ ''' monkey patch for boto issue boto/boto#2100 '''
+ profile = 'profile ' + self.boto_profile
+ if boto.config.has_option(profile, 'aws_security_token'):
+ connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
+ return connect_args
+
+ def connect_to_aws(self, module, region):
+ connect_args = {}
+
+ # only pass the profile name if it's set (as it is not supported by older boto versions)
+ if self.boto_profile:
+ connect_args['profile_name'] = self.boto_profile
+ self.boto_fix_security_token_in_profile(connect_args)
+
+ conn = module.connect_to_region(region, **connect_args)
+ # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
+ if conn is None:
+ self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
+ return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
- if self.eucalyptus:
- conn = boto.connect_euca(host=self.eucalyptus_host)
- conn.APIVersion = '2010-08-31'
- else:
- conn = ec2.connect_to_region(region)
-
- # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
- if conn is None:
- print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
- sys.exit(1)
-
+ conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
- for filter_key, filter_values in self.ec2_instance_filters.iteritems():
+ for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
@@ -370,40 +478,130 @@ class Ec2Inventory(object):
for instance in reservation.instances:
self.add_instance(instance, region)
- except boto.exception.BotoServerError, e:
- if not self.eucalyptus:
- print "Looks like AWS is down again:"
- print e
- sys.exit(1)
+ except boto.exception.BotoServerError as e:
+ if e.error_code == 'AuthFailure':
+ error = self.get_auth_error_message()
+ else:
+ backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
+ error = "Error connecting to %s backend.\n%s" % (backend, e.message)
+ self.fail_with_error(error, 'getting EC2 instances')
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
- conn = rds.connect_to_region(region)
+ conn = self.connect_to_aws(rds, region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
+ error = e.reason
+
+ if e.error_code == 'AuthFailure':
+ error = self.get_auth_error_message()
if not e.reason == "Forbidden":
- print "Looks like AWS RDS is down: "
- print e
- sys.exit(1)
+ error = "Looks like AWS RDS is down:\n%s" % e.message
+ self.fail_with_error(error, 'getting RDS instances')
- def get_instance(self, region, instance_id):
- ''' Gets details about a specific instance '''
- if self.eucalyptus:
- conn = boto.connect_euca(self.eucalyptus_host)
- conn.APIVersion = '2010-08-31'
+ def get_elasticache_clusters_by_region(self, region):
+ ''' Makes an AWS API call to the list of ElastiCache clusters (with
+ nodes' info) in a particular region.'''
+
+ # ElastiCache boto module doesn't provide a get_all_intances method,
+ # that's why we need to call describe directly (it would be called by
+ # the shorthand method anyway...)
+ try:
+ conn = elasticache.connect_to_region(region)
+ if conn:
+ # show_cache_node_info = True
+ # because we also want nodes' information
+ response = conn.describe_cache_clusters(None, None, None, True)
+
+ except boto.exception.BotoServerError as e:
+ error = e.reason
+
+ if e.error_code == 'AuthFailure':
+ error = self.get_auth_error_message()
+ if not e.reason == "Forbidden":
+ error = "Looks like AWS ElastiCache is down:\n%s" % e.message
+ self.fail_with_error(error, 'getting ElastiCache clusters')
+
+ try:
+ # Boto also doesn't provide wrapper classes to CacheClusters or
+ # CacheNodes. Because of that wo can't make use of the get_list
+ # method in the AWSQueryConnection. Let's do the work manually
+ clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
+
+ except KeyError as e:
+ error = "ElastiCache query to AWS failed (unexpected format)."
+ self.fail_with_error(error, 'getting ElastiCache clusters')
+
+ for cluster in clusters:
+ self.add_elasticache_cluster(cluster, region)
+
+ def get_elasticache_replication_groups_by_region(self, region):
+ ''' Makes an AWS API call to the list of ElastiCache replication groups
+ in a particular region.'''
+
+ # ElastiCache boto module doesn't provide a get_all_intances method,
+ # that's why we need to call describe directly (it would be called by
+ # the shorthand method anyway...)
+ try:
+ conn = elasticache.connect_to_region(region)
+ if conn:
+ response = conn.describe_replication_groups()
+
+ except boto.exception.BotoServerError as e:
+ error = e.reason
+
+ if e.error_code == 'AuthFailure':
+ error = self.get_auth_error_message()
+ if not e.reason == "Forbidden":
+ error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
+ self.fail_with_error(error, 'getting ElastiCache clusters')
+
+ try:
+ # Boto also doesn't provide wrapper classes to ReplicationGroups
+ # Because of that wo can't make use of the get_list method in the
+ # AWSQueryConnection. Let's do the work manually
+ replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
+
+ except KeyError as e:
+ error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
+ self.fail_with_error(error, 'getting ElastiCache clusters')
+
+ for replication_group in replication_groups:
+ self.add_elasticache_replication_group(replication_group, region)
+
+ def get_auth_error_message(self):
+ ''' create an informative error message if there is an issue authenticating'''
+ errors = ["Authentication error retrieving ec2 inventory."]
+ if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
+ errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
- conn = ec2.connect_to_region(region)
+ errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
- # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
- if conn is None:
- print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
- sys.exit(1)
+ boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
+ boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
+ if len(boto_config_found) > 0:
+ errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
+ else:
+ errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
+
+ return '\n'.join(errors)
+
+ def fail_with_error(self, err_msg, err_operation=None):
+ '''log an error to std err for ansible-playbook to consume and exit'''
+ if err_operation:
+ err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
+ err_msg=err_msg, err_operation=err_operation)
+ sys.stderr.write(err_msg)
+ sys.exit(1)
+
+ def get_instance(self, region, instance_id):
+ conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
@@ -414,8 +612,8 @@ class Ec2Inventory(object):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
- # Only want running instances unless all_instances is True
- if not self.all_instances and instance.state != 'running':
+ # Only return instances with desired instance states
+ if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
@@ -502,18 +700,21 @@ class Ec2Inventory(object):
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
- print 'Package boto seems a bit older.'
- print 'Please upgrade boto >= 2.3.0.'
- sys.exit(1)
+ self.fail_with_error('\n'.join(['Package boto seems a bit older.',
+ 'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
- for k, v in instance.tags.iteritems():
- key = self.to_safe("tag_" + k + "=" + v)
+ for k, v in instance.tags.items():
+ if v:
+ key = self.to_safe("tag_" + k + "=" + v)
+ else:
+ key = self.to_safe("tag_" + k)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
- self.push_group(self.inventory, self.to_safe("tag_" + k), key)
+ if v:
+ self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
@@ -597,9 +798,9 @@ class Ec2Inventory(object):
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
- print 'Package boto seems a bit older.'
- print 'Please upgrade boto >= 2.3.0.'
- sys.exit(1)
+ self.fail_with_error('\n'.join(['Package boto seems a bit older.',
+ 'Please upgrade boto >= 2.3.0.']))
+
# Inventory: Group by engine
if self.group_by_rds_engine:
@@ -618,6 +819,243 @@ class Ec2Inventory(object):
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+ def add_elasticache_cluster(self, cluster, region):
+ ''' Adds an ElastiCache cluster to the inventory and index, as long as
+ it's nodes are addressable '''
+
+ # Only want available clusters unless all_elasticache_clusters is True
+ if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
+ return
+
+ # Select the best destination address
+ if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
+ # Memcached cluster
+ dest = cluster['ConfigurationEndpoint']['Address']
+ is_redis = False
+ else:
+ # Redis sigle node cluster
+ # Because all Redis clusters are single nodes, we'll merge the
+ # info from the cluster with info about the node
+ dest = cluster['CacheNodes'][0]['Endpoint']['Address']
+ is_redis = True
+
+ if not dest:
+ # Skip clusters we cannot address (e.g. private VPC subnet)
+ return
+
+ # Add to index
+ self.index[dest] = [region, cluster['CacheClusterId']]
+
+ # Inventory: Group by instance ID (always a group of 1)
+ if self.group_by_instance_id:
+ self.inventory[cluster['CacheClusterId']] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
+
+ # Inventory: Group by region
+ if self.group_by_region and not is_redis:
+ self.push(self.inventory, region, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
+
+ # Inventory: Group by availability zone
+ if self.group_by_availability_zone and not is_redis:
+ self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
+ if self.nested_groups:
+ if self.group_by_region:
+ self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
+ self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
+
+ # Inventory: Group by node type
+ if self.group_by_instance_type and not is_redis:
+ type_name = self.to_safe('type_' + cluster['CacheNodeType'])
+ self.push(self.inventory, type_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'types', type_name)
+
+ # Inventory: Group by VPC (information not available in the current
+ # AWS API version for ElastiCache)
+
+ # Inventory: Group by security group
+ if self.group_by_security_group and not is_redis:
+
+ # Check for the existence of the 'SecurityGroups' key and also if
+ # this key has some value. When the cluster is not placed in a SG
+ # the query can return None here and cause an error.
+ if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
+ for security_group in cluster['SecurityGroups']:
+ key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'security_groups', key)
+
+ # Inventory: Group by engine
+ if self.group_by_elasticache_engine and not is_redis:
+ self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
+
+ # Inventory: Group by parameter group
+ if self.group_by_elasticache_parameter_group:
+ self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
+
+ # Inventory: Group by replication group
+ if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
+ self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
+
+ # Global Tag: all ElastiCache clusters
+ self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
+
+ host_info = self.get_host_info_dict_from_describe_dict(cluster)
+
+ self.inventory["_meta"]["hostvars"][dest] = host_info
+
+ # Add the nodes
+ for node in cluster['CacheNodes']:
+ self.add_elasticache_node(node, cluster, region)
+
+ def add_elasticache_node(self, node, cluster, region):
+ ''' Adds an ElastiCache node to the inventory and index, as long as
+ it is addressable '''
+
+ # Only want available nodes unless all_elasticache_nodes is True
+ if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
+ return
+
+ # Select the best destination address
+ dest = node['Endpoint']['Address']
+
+ if not dest:
+ # Skip nodes we cannot address (e.g. private VPC subnet)
+ return
+
+ node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
+
+ # Add to index
+ self.index[dest] = [region, node_id]
+
+ # Inventory: Group by node ID (always a group of 1)
+ if self.group_by_instance_id:
+ self.inventory[node_id] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', node_id)
+
+ # Inventory: Group by region
+ if self.group_by_region:
+ self.push(self.inventory, region, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
+
+ # Inventory: Group by availability zone
+ if self.group_by_availability_zone:
+ self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
+ if self.nested_groups:
+ if self.group_by_region:
+ self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
+ self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
+
+ # Inventory: Group by node type
+ if self.group_by_instance_type:
+ type_name = self.to_safe('type_' + cluster['CacheNodeType'])
+ self.push(self.inventory, type_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'types', type_name)
+
+ # Inventory: Group by VPC (information not available in the current
+ # AWS API version for ElastiCache)
+
+ # Inventory: Group by security group
+ if self.group_by_security_group:
+
+ # Check for the existence of the 'SecurityGroups' key and also if
+ # this key has some value. When the cluster is not placed in a SG
+ # the query can return None here and cause an error.
+ if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
+ for security_group in cluster['SecurityGroups']:
+ key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'security_groups', key)
+
+ # Inventory: Group by engine
+ if self.group_by_elasticache_engine:
+ self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
+
+ # Inventory: Group by parameter group (done at cluster level)
+
+ # Inventory: Group by replication group (done at cluster level)
+
+ # Inventory: Group by ElastiCache Cluster
+ if self.group_by_elasticache_cluster:
+ self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
+
+ # Global Tag: all ElastiCache nodes
+ self.push(self.inventory, 'elasticache_nodes', dest)
+
+ host_info = self.get_host_info_dict_from_describe_dict(node)
+
+ if dest in self.inventory["_meta"]["hostvars"]:
+ self.inventory["_meta"]["hostvars"][dest].update(host_info)
+ else:
+ self.inventory["_meta"]["hostvars"][dest] = host_info
+
+ def add_elasticache_replication_group(self, replication_group, region):
+ ''' Adds an ElastiCache replication group to the inventory and index '''
+
+ # Only want available clusters unless all_elasticache_replication_groups is True
+ if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
+ return
+
+ # Select the best destination address (PrimaryEndpoint)
+ dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
+
+ if not dest:
+ # Skip clusters we cannot address (e.g. private VPC subnet)
+ return
+
+ # Add to index
+ self.index[dest] = [region, replication_group['ReplicationGroupId']]
+
+ # Inventory: Group by ID (always a group of 1)
+ if self.group_by_instance_id:
+ self.inventory[replication_group['ReplicationGroupId']] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
+
+ # Inventory: Group by region
+ if self.group_by_region:
+ self.push(self.inventory, region, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
+
+ # Inventory: Group by availability zone (doesn't apply to replication groups)
+
+ # Inventory: Group by node type (doesn't apply to replication groups)
+
+ # Inventory: Group by VPC (information not available in the current
+ # AWS API version for replication groups
+
+ # Inventory: Group by security group (doesn't apply to replication groups)
+ # Check this value in cluster level
+
+ # Inventory: Group by engine (replication groups are always Redis)
+ if self.group_by_elasticache_engine:
+ self.push(self.inventory, 'elasticache_redis', dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_engines', 'redis')
+
+ # Global Tag: all ElastiCache clusters
+ self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
+
+ host_info = self.get_host_info_dict_from_describe_dict(replication_group)
+
+ self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
@@ -666,7 +1104,6 @@ class Ec2Inventory(object):
return list(name_list)
-
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
@@ -683,7 +1120,7 @@ class Ec2Inventory(object):
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
- elif type(value) in [str, unicode]:
+ elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
@@ -692,7 +1129,7 @@ class Ec2Inventory(object):
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
- for k, v in value.iteritems():
+ for k, v in value.items():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
@@ -712,6 +1149,91 @@ class Ec2Inventory(object):
return instance_vars
+ def get_host_info_dict_from_describe_dict(self, describe_dict):
+ ''' Parses the dictionary returned by the API call into a flat list
+ of parameters. This method should be used only when 'describe' is
+ used directly because Boto doesn't provide specific classes. '''
+
+ # I really don't agree with prefixing everything with 'ec2'
+ # because EC2, RDS and ElastiCache are different services.
+ # I'm just following the pattern used until now to not break any
+ # compatibility.
+
+ host_info = {}
+ for key in describe_dict:
+ value = describe_dict[key]
+ key = self.to_safe('ec2_' + self.uncammelize(key))
+
+ # Handle complex types
+
+ # Target: Memcached Cache Clusters
+ if key == 'ec2_configuration_endpoint' and value:
+ host_info['ec2_configuration_endpoint_address'] = value['Address']
+ host_info['ec2_configuration_endpoint_port'] = value['Port']
+
+ # Target: Cache Nodes and Redis Cache Clusters (single node)
+ if key == 'ec2_endpoint' and value:
+ host_info['ec2_endpoint_address'] = value['Address']
+ host_info['ec2_endpoint_port'] = value['Port']
+
+ # Target: Redis Replication Groups
+ if key == 'ec2_node_groups' and value:
+ host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
+ host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
+ replica_count = 0
+ for node in value[0]['NodeGroupMembers']:
+ if node['CurrentRole'] == 'primary':
+ host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
+ host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
+ host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
+ elif node['CurrentRole'] == 'replica':
+ host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
+ host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
+ host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
+ replica_count += 1
+
+ # Target: Redis Replication Groups
+ if key == 'ec2_member_clusters' and value:
+ host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
+
+ # Target: All Cache Clusters
+ elif key == 'ec2_cache_parameter_group':
+ host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
+ host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
+ host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
+
+ # Target: Almost everything
+ elif key == 'ec2_security_groups':
+
+ # Skip if SecurityGroups is None
+ # (it is possible to have the key defined but no value in it).
+ if value is not None:
+ sg_ids = []
+ for sg in value:
+ sg_ids.append(sg['SecurityGroupId'])
+ host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
+
+ # Target: Everything
+ # Preserve booleans and integers
+ elif type(value) in [int, bool]:
+ host_info[key] = value
+
+ # Target: Everything
+ # Sanitize string values
+ elif isinstance(value, six.string_types):
+ host_info[key] = value.strip()
+
+ # Target: Everything
+ # Replace None by an empty string
+ elif type(value) == type(None):
+ host_info[key] = ''
+
+ else:
+ # Remove non-processed complex types
+ pass
+
+ return host_info
+
def get_host_info(self):
''' Get variables about a specific host '''
@@ -775,13 +1297,16 @@ class Ec2Inventory(object):
cache.write(json_data)
cache.close()
+ def uncammelize(self, key):
+ temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
+ return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
- ''' Converts 'bad' characters in a string to underscores so they can be
- used as Ansible groups '''
-
- return re.sub("[^A-Za-z0-9\-]", "_", word)
-
+ ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
+ regex = "[^A-Za-z0-9\_"
+ if not self.replace_dash_in_groups:
+ regex += "\-"
+ return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts
index 34a4396bd..3996e577e 100644
--- a/inventory/aws/hosts/hosts
+++ b/inventory/aws/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/byo/.gitignore b/inventory/byo/.gitignore
new file mode 100644
index 000000000..6ff331c7e
--- /dev/null
+++ b/inventory/byo/.gitignore
@@ -0,0 +1 @@
+hosts
diff --git a/inventory/byo/hosts b/inventory/byo/hosts
deleted file mode 100644
index a9add6a60..000000000
--- a/inventory/byo/hosts
+++ /dev/null
@@ -1,43 +0,0 @@
-# This is an example of a bring your own (byo) host inventory
-
-# Create an OSEv3 group that contains the masters and nodes groups
-[OSEv3:children]
-masters
-nodes
-etcd
-
-# Set variables common for all OSEv3 hosts
-[OSEv3:vars]
-# SSH user, this user should allow ssh based auth without requiring a password
-ansible_ssh_user=root
-
-# If ansible_ssh_user is not root, ansible_sudo must be set to true
-#ansible_sudo=true
-
-# To deploy origin, change deployment_type to origin
-deployment_type=enterprise
-
-# Pre-release registry URL
-oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}
-
-# Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
-#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
-
-# Origin copr repo
-#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
-
-# htpasswd auth
-#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
-
-# host group for masters
-[masters]
-ose3-master-ansible.test.example.com
-
-[etcd]
-#ose3-master-ansible.test.example.com
-
-# host group for nodes
-[nodes]
-ose3-master-ansible.test.example.com openshift_scheduleable=False
-ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/inventory/byo/hosts.openstack b/inventory/byo/hosts.openstack
new file mode 100644
index 000000000..ea7e905cb
--- /dev/null
+++ b/inventory/byo/hosts.openstack
@@ -0,0 +1,37 @@
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+etcd
+lb
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+ansible_ssh_user=cloud-user
+ansible_become=yes
+
+# Debug level for all OpenShift components (Defaults to 2)
+debug_level=2
+
+deployment_type=openshift-enterprise
+
+openshift_additional_repos=[{'id': 'ose-3.1', 'name': 'ose-3.1', 'baseurl': 'http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/x86_64/ose/3.1/os', 'enabled': 1, 'gpgcheck': 0}]
+
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift.common.config_base }}/htpasswd'}]
+
+#openshift_pkg_version=-3.0.0.0
+
+[masters]
+jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}"
+
+[etcd]
+jdetiber-etcd.usersys.redhat.com
+
+[lb]
+#ose3-lb-ansible.test.example.com
+
+[nodes]
+jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}"
+jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
new file mode 100644
index 000000000..5a95ecf94
--- /dev/null
+++ b/inventory/byo/hosts.origin.example
@@ -0,0 +1,658 @@
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+etcd
+lb
+nfs
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a
+# password. If using ssh key based auth, then the key should be managed by an
+# ssh agent.
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_become must be set to true and the
+# user must be configured for passwordless sudo
+#ansible_become=yes
+
+# Debug level for all OpenShift components (Defaults to 2)
+debug_level=2
+
+# deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise
+deployment_type=origin
+
+# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
+# rely on the version running on the first master. Works best for containerized installs where we can usually
+# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
+# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
+# release.
+openshift_release=v1.4
+
+# Specify an exact container image tag to install or configure.
+# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_image_tag=v1.2.0
+
+# Specify an exact rpm version to install or configure.
+# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_pkg_version=-1.2.0
+
+# Install the openshift examples
+#openshift_install_examples=true
+
+# Configure logoutURL in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
+#openshift_master_logout_url=http://example.com
+
+# Configure extensionScripts in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
+
+# Configure extensionStylesheets in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_oauth_template=/path/to/login-template.html
+
+# Configure imagePolicyConfig in the master config
+# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
+#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
+
+# Docker Configuration
+# Add additional, insecure, and blocked registries to global docker configuration
+# For enterprise deployment types we ensure that registry.access.redhat.com is
+# included if you do not include it
+#openshift_docker_additional_registries=registry.example.com
+#openshift_docker_insecure_registries=registry.example.com
+#openshift_docker_blocked_registries=registry.hacker.com
+# Disable pushing to dockerhub
+#openshift_docker_disable_push_dockerhub=True
+# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
+# Default value: "--log-driver=json-file --log-opt max-size=50m"
+#openshift_docker_options="-l warn --ipv6=false"
+
+# Specify exact version of Docker to configure or upgrade to.
+# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
+# docker_version="1.10.3"
+
+# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
+# docker_upgrade=False
+
+# Alternate image format string, useful if you've got your own registry mirror
+#oreg_url=example.com/openshift3/ose-${component}:${version}
+# If oreg_url points to a registry other than registry.access.redhat.com we can
+# modify image streams to point at that registry by setting the following to true
+#openshift_examples_modify_imagestreams=true
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
+# htpasswd auth
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
+# Defining htpasswd users
+#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
+# or
+#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
+
+# Allow all auth
+#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
+
+# LDAP auth
+#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+#
+# Configure LDAP CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "ca" key set
+# within the LDAPPasswordIdentityProvider.
+#
+#openshift_master_ldap_ca=<ca text>
+# or
+#openshift_master_ldap_ca_file=<path to local ca file to use>
+
+# OpenID auth
+#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
+#
+# Configure OpenID CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "ca" key set
+# within the OpenIDIdentityProvider.
+#
+#openshift_master_openid_ca=<ca text>
+# or
+#openshift_master_openid_ca_file=<path to local ca file to use>
+
+# Request header auth
+#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
+#
+# Configure request header CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "clientCA"
+# key set within the RequestHeaderIdentityProvider.
+#
+#openshift_master_request_header_ca=<ca text>
+# or
+#openshift_master_request_header_ca_file=<path to local ca file to use>
+
+# Cloud Provider Configuration
+#
+# Note: You may make use of environment variables rather than store
+# sensitive configuration within the ansible inventory.
+# For example:
+#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
+#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
+#
+# AWS
+#openshift_cloudprovider_kind=aws
+# Note: IAM profiles may be used instead of storing API credentials on disk.
+#openshift_cloudprovider_aws_access_key=aws_access_key_id
+#openshift_cloudprovider_aws_secret_key=aws_secret_access_key
+#
+# Openstack
+#openshift_cloudprovider_kind=openstack
+#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
+#openshift_cloudprovider_openstack_username=username
+#openshift_cloudprovider_openstack_password=password
+#openshift_cloudprovider_openstack_domain_id=domain_id
+#openshift_cloudprovider_openstack_domain_name=domain_name
+#openshift_cloudprovider_openstack_tenant_id=tenant_id
+#openshift_cloudprovider_openstack_tenant_name=tenant_name
+#openshift_cloudprovider_openstack_region=region
+#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
+#
+# GCE
+#openshift_cloudprovider_kind=gce
+
+# Project Configuration
+#osm_project_request_message=''
+#osm_project_request_template=''
+#osm_mcs_allocator_range='s0:/2'
+#osm_mcs_labels_per_project=5
+#osm_uid_allocator_range='1000000000-1999999999/10000'
+
+# Configure additional projects
+#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}}
+
+# Enable cockpit
+#osm_use_cockpit=true
+#
+# Set cockpit plugins
+#osm_cockpit_plugins=['cockpit-kubernetes']
+
+# Native high availability cluster method with optional load balancer.
+# If no lb group is defined, the installer assumes that a load balancer has
+# been preconfigured. For installation the value of
+# openshift_master_cluster_hostname must resolve to the load balancer
+# or to one or all of the masters defined in the inventory if no load
+# balancer is present.
+#openshift_master_cluster_method=native
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Pacemaker high availability cluster method.
+# Pacemaker HA environment must be able to self provision the
+# configured VIP. For installation openshift_master_cluster_hostname
+# must resolve to the configured VIP.
+#openshift_master_cluster_method=pacemaker
+#openshift_master_cluster_password=openshift_cluster
+#openshift_master_cluster_vip=192.168.133.25
+#openshift_master_cluster_public_vip=192.168.133.25
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Override the default controller lease ttl
+#osm_controller_lease_ttl=30
+
+# Configure controller arguments
+#osm_controller_args={'resource-quota-sync-period': ['10s']}
+
+# Configure api server arguments
+#osm_api_server_args={'max-requests-inflight': ['400']}
+
+# default subdomain to use for exposed routes
+#openshift_master_default_subdomain=apps.test.example.com
+
+# additional cors origins
+#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
+
+# default project node selector
+#osm_default_node_selector='region=primary'
+
+# Override the default pod eviction timeout
+#openshift_master_pod_eviction_timeout=5m
+
+# Override the default oauth tokenConfig settings:
+# openshift_master_access_token_max_seconds=86400
+# openshift_master_auth_token_max_seconds=500
+
+# Override master servingInfo.maxRequestsInFlight
+#openshift_master_max_requests_inflight=500
+
+# default storage plugin dependencies to install, by default the ceph and
+# glusterfs plugin dependencies will be installed, if available.
+#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
+
+# OpenShift Router Options
+#
+# An OpenShift router will be created during install if there are
+# nodes present with labels matching the default router selector,
+# "region=infra". Set openshift_node_labels per node as needed in
+# order to label nodes.
+#
+# Example:
+# [nodes]
+# node.example.com openshift_node_labels="{'region': 'infra'}"
+#
+# Router selector (optional)
+# Router will only be created if nodes matching this label are present.
+# Default value: 'region=infra'
+#openshift_hosted_router_selector='region=infra'
+#
+# Router replicas (optional)
+# Unless specified, openshift-ansible will calculate the replica count
+# based on the number of nodes matching the openshift router selector.
+#openshift_hosted_router_replicas=2
+#
+# Router force subdomain (optional)
+# A router path format to force on all routes used by this router
+# (will ignore the route host value)
+#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
+#
+# Router certificate (optional)
+# Provide local certificate paths which will be configured as the
+# router's default certificate.
+#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
+#
+# Disable management of the OpenShift Router
+#openshift_hosted_manage_router=false
+
+# Openshift Registry Options
+#
+# An OpenShift registry will be created during install if there are
+# nodes present with labels matching the default registry selector,
+# "region=infra". Set openshift_node_labels per node as needed in
+# order to label nodes.
+#
+# Example:
+# [nodes]
+# node.example.com openshift_node_labels="{'region': 'infra'}"
+#
+# Registry selector (optional)
+# Registry will only be created if nodes matching this label are present.
+# Default value: 'region=infra'
+#openshift_hosted_registry_selector='region=infra'
+#
+# Registry replicas (optional)
+# Unless specified, openshift-ansible will calculate the replica count
+# based on the number of nodes matching the openshift registry selector.
+#openshift_hosted_registry_replicas=2
+#
+# Disable management of the OpenShift Registry
+#openshift_hosted_manage_registry=false
+
+# Registry Storage Options
+#
+# NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/registry"
+#openshift_hosted_registry_storage_kind=nfs
+#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
+#openshift_hosted_registry_storage_nfs_directory=/exports
+#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_registry_storage_volume_name=registry
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
+# External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/registry"
+#openshift_hosted_registry_storage_kind=nfs
+#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
+#openshift_hosted_registry_storage_host=nfs.example.com
+#openshift_hosted_registry_storage_nfs_directory=/exports
+#openshift_hosted_registry_storage_volume_name=registry
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
+# Openstack
+# Volume must already exist.
+#openshift_hosted_registry_storage_kind=openstack
+#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_registry_storage_openstack_filesystem=ext4
+#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
+# AWS S3
+# S3 bucket must already exist.
+#openshift_hosted_registry_storage_kind=object
+#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
+#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
+#openshift_hosted_registry_storage_s3_bucket=bucket_name
+#openshift_hosted_registry_storage_s3_region=bucket_region
+#openshift_hosted_registry_storage_s3_chunksize=26214400
+#openshift_hosted_registry_storage_s3_rootdirectory=/registry
+#openshift_hosted_registry_pullthrough=true
+#openshift_hosted_registry_acceptschema2=true
+#openshift_hosted_registry_enforcequota=true
+#
+# Any S3 service (Minio, ExoScale, ...): Basically the same as above
+# but with regionendpoint configured
+# S3 bucket must already exist.
+#openshift_hosted_registry_storage_kind=object
+#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_accesskey=access_key_id
+#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
+#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
+#openshift_hosted_registry_storage_s3_bucket=bucket_name
+#openshift_hosted_registry_storage_s3_region=bucket_region
+#openshift_hosted_registry_storage_s3_chunksize=26214400
+#openshift_hosted_registry_storage_s3_rootdirectory=/registry
+#openshift_hosted_registry_pullthrough=true
+#openshift_hosted_registry_acceptschema2=true
+#openshift_hosted_registry_enforcequota=true
+
+# Metrics deployment
+# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
+#
+# By default metrics are not automatically deployed, set this to enable them
+# openshift_hosted_metrics_deploy=true
+#
+# Storage Options
+# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
+# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
+# Storage options A & B currently support only one cassandra pod which is
+# generally enough for up to 1000 pods. Additional volumes can be created
+# manually after the fact and metrics scaled per the docs.
+#
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/metrics"
+#openshift_hosted_metrics_storage_kind=nfs
+#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_metrics_storage_nfs_directory=/exports
+#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_metrics_storage_volume_name=metrics
+#openshift_hosted_metrics_storage_volume_size=10Gi
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/metrics"
+#openshift_hosted_metrics_storage_kind=nfs
+#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_metrics_storage_host=nfs.example.com
+#openshift_hosted_metrics_storage_nfs_directory=/exports
+#openshift_hosted_metrics_storage_volume_name=metrics
+#openshift_hosted_metrics_storage_volume_size=10Gi
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_hosted_metrics_storage_kind=dynamic
+#
+# Override metricsPublicURL in the master config for cluster metrics
+# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
+# Currently, you may only alter the hostname portion of the url, alterting the
+# `/hawkular/metrics` path will break installation of metrics.
+#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+
+# Logging deployment
+#
+# Currently logging deployment is disabled by default, enable it by setting this
+#openshift_hosted_logging_deploy=true
+#
+# Logging storage config
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/logging"
+#openshift_hosted_logging_storage_kind=nfs
+#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_logging_storage_nfs_directory=/exports
+#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_logging_storage_volume_name=logging
+#openshift_hosted_logging_storage_volume_size=10Gi
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/logging"
+#openshift_hosted_logging_storage_kind=nfs
+#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_logging_storage_host=nfs.example.com
+#openshift_hosted_logging_storage_nfs_directory=/exports
+#openshift_hosted_logging_storage_volume_name=logging
+#openshift_hosted_logging_storage_volume_size=10Gi
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_hosted_logging_storage_kind=dynamic
+#
+# Option D - none -- Logging will use emptydir volumes which are destroyed when
+# pods are deleted
+#
+# Other Logging Options -- Common items you may wish to reconfigure, for the complete
+# list of options please see roles/openshift_hosted_logging/README.md
+#
+# Configure loggingPublicURL in the master config for aggregate logging, defaults
+# to https://kibana.{{ openshift_master_default_subdomain }}
+#openshift_master_logging_public_url=https://kibana.example.com
+# Configure the number of elastic search nodes, unless you're using dynamic provisioning
+# this value must be 1
+#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_hosted_logging_hostname=logging.apps.example.com
+# Configure the prefix and version for the deployer image
+#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
+#openshift_hosted_logging_deployer_version=3.3.0
+
+# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
+# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
+
+# Disable the OpenShift SDN plugin
+# openshift_use_openshift_sdn=False
+
+# Configure SDN cluster network and kubernetes service CIDR blocks. These
+# network blocks should be private and should not conflict with network blocks
+# in your infrastructure that pods may require access to. Can not be changed
+# after deployment.
+#osm_cluster_network_cidr=10.128.0.0/14
+#openshift_portal_net=172.30.0.0/16
+
+
+# ExternalIPNetworkCIDRs controls what values are acceptable for the
+# service external IP field. If empty, no externalIP may be set. It
+# may contain a list of CIDRs which are checked for access. If a CIDR
+# is prefixed with !, IPs in that CIDR will be rejected. Rejections
+# will be applied first, then the IP checked against one of the
+# allowed CIDRs. You should ensure this range does not overlap with
+# your nodes, pods, or service CIDRs for security reasons.
+#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
+
+# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
+# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
+# be assigned. It may contain a single CIDR that will be allocated from. For
+# security reasons, you should ensure that this range does not overlap with
+# the CIDRs reserved for external IPs, nodes, pods, or services.
+#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
+
+# Configure number of bits to allocate to each host’s subnet e.g. 9
+# would mean a /23 network on the host.
+#osm_host_subnet_length=9
+
+# Configure master API and console ports.
+#openshift_master_api_port=8443
+#openshift_master_console_port=8443
+
+# set RPM version for debugging purposes
+#openshift_pkg_version=-1.1
+
+# Configure custom ca certificate
+#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
+#
+# NOTE: CA certificate will not be replaced with existing clusters.
+# This option may only be specified when creating a new cluster or
+# when redeploying cluster certificates with the redeploy-certificates
+# playbook. If replacing the CA certificate in an existing cluster
+# with a custom ca certificate, the following variable must also be
+# set.
+#openshift_certificates_redeploy_ca=true
+
+# Configure custom named certificates (SNI certificates)
+#
+# https://docs.openshift.org/latest/install_config/certificate_customization.html
+#
+# NOTE: openshift_master_named_certificates is cached on masters and is an
+# additive fact, meaning that each run with a different set of certificates
+# will add the newly provided certificates to the cached set of certificates.
+#
+# An optional CA may be specified for each named certificate. CAs will
+# be added to the OpenShift CA bundle which allows for the named
+# certificate to be served for internal cluster communication.
+#
+# If you would like openshift_master_named_certificates to be overwritten with
+# the provided value, specify openshift_master_overwrite_named_certificates.
+#openshift_master_overwrite_named_certificates=true
+#
+# Provide local certificate paths which will be deployed to masters
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
+#
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
+
+# Session options
+#openshift_master_session_name=ssn
+#openshift_master_session_max_seconds=3600
+
+# An authentication and encryption secret will be generated if secrets
+# are not provided. If provided, openshift_master_session_auth_secrets
+# and openshift_master_encryption_secrets must be equal length.
+#
+# Signing secrets, used to authenticate sessions using
+# HMAC. Recommended to use secrets with 32 or 64 bytes.
+#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+#
+# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
+# characters long, to select AES-128, AES-192, or AES-256.
+#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+
+# configure how often node iptables rules are refreshed
+#openshift_node_iptables_sync_period=5s
+
+# Configure nodeIP in the node config
+# This is needed in cases where node traffic is desired to go over an
+# interface other than the default network interface.
+#openshift_node_set_node_ip=True
+
+# Force setting of system hostname when configuring OpenShift
+# This works around issues related to installations that do not have valid dns
+# entries for the interfaces attached to the host.
+#openshift_set_hostname=True
+
+# Configure dnsIP in the node config
+#openshift_dns_ip=172.30.0.1
+
+# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+
+# Configure logrotate scripts
+# See: https://github.com/nickhammond/ansible-logrotate
+#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
+
+# openshift-ansible will wait indefinitely for your input when it detects that the
+# value of openshift_hostname resolves to an IP address not bound to any local
+# interfaces. This mis-configuration is problematic for any pod leveraging host
+# networking and liveness or readiness probes.
+# Setting this variable to true will override that check.
+#openshift_override_hostname_check=true
+
+# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
+# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
+# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
+# be used with 1.0 and 3.0.
+#openshift_use_dnsmasq=False
+# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
+# This is useful for POC environments where DNS may not actually be available yet.
+#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
+
+# Global Proxy Configuration
+# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
+# variables for docker and master services.
+#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
+#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
+#openshift_no_proxy='.hosts.example.com,some-host.com'
+#
+# Most environments don't require a proxy between openshift masters, nodes, and
+# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
+# If all of your hosts share a common domain you may wish to disable this and
+# specify that domain above.
+#openshift_generate_no_proxy_hosts=True
+#
+# These options configure the BuildDefaults admission controller which injects
+# environment variables into Builds. These values will default to the global proxy
+# config values. You only need to set these if they differ from the global settings
+# above. See BuildDefaults
+# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_no_proxy=build_defaults
+#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
+# Or you may optionally define your own serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"kind":"BuildDefaultsConfig","apiVersion":"v1","gitHTTPSProxy":"http://proxy.example.com.redhat.com:3128","gitHTTPProxy":"http://proxy.example.com.redhat.com:3128","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"HTTPS_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}]}}'
+# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
+#openshift_master_dynamic_provisioning_enabled=False
+
+# Configure usage of openshift_clock role.
+#openshift_clock_enabled=true
+
+# OpenShift Per-Service Environment Variables
+# Environment variables are added to /etc/sysconfig files for
+# each OpenShift service: node, master (api and controllers).
+# API and controllers environment variables are merged in single
+# master environments.
+#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
+#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
+#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
+
+# Enable API service auditing, available as of 1.3
+#openshift_master_audit_config={"basicAuditEnabled": true}
+
+# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
+# by deployment_type=origin
+#openshift_enable_origin_repo=false
+
+# host group for masters
+[masters]
+ose3-master[1:3]-ansible.test.example.com
+
+[etcd]
+ose3-etcd[1:3]-ansible.test.example.com
+
+# NOTE: Containerized load balancer hosts are not yet supported, if using a global
+# containerized=true host variable we must set to false.
+[lb]
+ose3-lb-ansible.test.example.com containerized=false
+
+# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
+# However, in order to ensure that your masters are not burdened with running pods you should
+# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
+[nodes]
+ose3-master[1:3]-ansible.test.example.com
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
new file mode 100644
index 000000000..be919c105
--- /dev/null
+++ b/inventory/byo/hosts.ose.example
@@ -0,0 +1,654 @@
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+etcd
+lb
+nfs
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a
+# password. If using ssh key based auth, then the key should be managed by an
+# ssh agent.
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_become must be set to true and the
+# user must be configured for passwordless sudo
+#ansible_become=yes
+
+# Debug level for all OpenShift components (Defaults to 2)
+debug_level=2
+
+# deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
+deployment_type=openshift-enterprise
+
+# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
+# rely on the version running on the first master. Works best for containerized installs where we can usually
+# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
+# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
+# release.
+openshift_release=v3.4
+
+# Specify an exact container image tag to install or configure.
+# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_image_tag=v3.2.0.46
+
+# Specify an exact rpm version to install or configure.
+# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_pkg_version=-3.2.0.46
+
+# Install the openshift examples
+#openshift_install_examples=true
+
+# Configure logoutURL in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
+#openshift_master_logout_url=http://example.com
+
+# Configure extensionScripts in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
+
+# Configure extensionStylesheets in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_oauth_template=/path/to/login-template.html
+
+# Configure imagePolicyConfig in the master config
+# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
+#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
+
+# Docker Configuration
+# Add additional, insecure, and blocked registries to global docker configuration
+# For enterprise deployment types we ensure that registry.access.redhat.com is
+# included if you do not include it
+#openshift_docker_additional_registries=registry.example.com
+#openshift_docker_insecure_registries=registry.example.com
+#openshift_docker_blocked_registries=registry.hacker.com
+# Disable pushing to dockerhub
+#openshift_docker_disable_push_dockerhub=True
+# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
+# Default value: "--log-driver=json-file --log-opt max-size=50m"
+#openshift_docker_options="-l warn --ipv6=false"
+
+# Specify exact version of Docker to configure or upgrade to.
+# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
+# docker_version="1.10.3"
+
+# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
+# docker_upgrade=False
+
+# Alternate image format string, useful if you've got your own registry mirror
+#oreg_url=example.com/openshift3/ose-${component}:${version}
+# If oreg_url points to a registry other than registry.access.redhat.com we can
+# modify image streams to point at that registry by setting the following to true
+#openshift_examples_modify_imagestreams=true
+
+# Additional yum repos to install
+#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+
+# htpasswd auth
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
+# Defining htpasswd users
+#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
+# or
+#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
+
+# Allow all auth
+#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
+
+# LDAP auth
+#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+#
+# Configure LDAP CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "ca" key set
+# within the LDAPPasswordIdentityProvider.
+#
+#openshift_master_ldap_ca=<ca text>
+# or
+#openshift_master_ldap_ca_file=<path to local ca file to use>
+
+# OpenID auth
+#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
+#
+# Configure OpenID CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "ca" key set
+# within the OpenIDIdentityProvider.
+#
+#openshift_master_openid_ca=<ca text>
+# or
+#openshift_master_openid_ca_file=<path to local ca file to use>
+
+# Request header auth
+#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
+#
+# Configure request header CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "clientCA"
+# key set within the RequestHeaderIdentityProvider.
+#
+#openshift_master_request_header_ca=<ca text>
+# or
+#openshift_master_request_header_ca_file=<path to local ca file to use>
+
+# Cloud Provider Configuration
+#
+# Note: You may make use of environment variables rather than store
+# sensitive configuration within the ansible inventory.
+# For example:
+#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
+#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
+#
+# AWS
+#openshift_cloudprovider_kind=aws
+# Note: IAM profiles may be used instead of storing API credentials on disk.
+#openshift_cloudprovider_aws_access_key=aws_access_key_id
+#openshift_cloudprovider_aws_secret_key=aws_secret_access_key
+#
+# Openstack
+#openshift_cloudprovider_kind=openstack
+#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
+#openshift_cloudprovider_openstack_username=username
+#openshift_cloudprovider_openstack_password=password
+#openshift_cloudprovider_openstack_domain_id=domain_id
+#openshift_cloudprovider_openstack_domain_name=domain_name
+#openshift_cloudprovider_openstack_tenant_id=tenant_id
+#openshift_cloudprovider_openstack_tenant_name=tenant_name
+#openshift_cloudprovider_openstack_region=region
+#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
+#
+# GCE
+#openshift_cloudprovider_kind=gce
+
+# Project Configuration
+#osm_project_request_message=''
+#osm_project_request_template=''
+#osm_mcs_allocator_range='s0:/2'
+#osm_mcs_labels_per_project=5
+#osm_uid_allocator_range='1000000000-1999999999/10000'
+
+# Configure additional projects
+#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}}
+
+# Enable cockpit
+#osm_use_cockpit=true
+#
+# Set cockpit plugins
+#osm_cockpit_plugins=['cockpit-kubernetes']
+
+# Native high availability cluster method with optional load balancer.
+# If no lb group is defined, the installer assumes that a load balancer has
+# been preconfigured. For installation the value of
+# openshift_master_cluster_hostname must resolve to the load balancer
+# or to one or all of the masters defined in the inventory if no load
+# balancer is present.
+#openshift_master_cluster_method=native
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Pacemaker high availability cluster method.
+# Pacemaker HA environment must be able to self provision the
+# configured VIP. For installation openshift_master_cluster_hostname
+# must resolve to the configured VIP.
+#openshift_master_cluster_method=pacemaker
+#openshift_master_cluster_password=openshift_cluster
+#openshift_master_cluster_vip=192.168.133.25
+#openshift_master_cluster_public_vip=192.168.133.25
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Override the default controller lease ttl
+#osm_controller_lease_ttl=30
+
+# Configure controller arguments
+#osm_controller_args={'resource-quota-sync-period': ['10s']}
+
+# Configure api server arguments
+#osm_api_server_args={'max-requests-inflight': ['400']}
+
+# default subdomain to use for exposed routes
+#openshift_master_default_subdomain=apps.test.example.com
+
+# additional cors origins
+#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
+
+# default project node selector
+#osm_default_node_selector='region=primary'
+
+# Override the default pod eviction timeout
+#openshift_master_pod_eviction_timeout=5m
+
+# Override the default oauth tokenConfig settings:
+# openshift_master_access_token_max_seconds=86400
+# openshift_master_auth_token_max_seconds=500
+
+# Override master servingInfo.maxRequestsInFlight
+#openshift_master_max_requests_inflight=500
+
+# default storage plugin dependencies to install, by default the ceph and
+# glusterfs plugin dependencies will be installed, if available.
+#osn_storage_plugin_deps=['ceph','glusterfs']
+
+# OpenShift Router Options
+#
+# An OpenShift router will be created during install if there are
+# nodes present with labels matching the default router selector,
+# "region=infra". Set openshift_node_labels per node as needed in
+# order to label nodes.
+#
+# Example:
+# [nodes]
+# node.example.com openshift_node_labels="{'region': 'infra'}"
+#
+# Router selector (optional)
+# Router will only be created if nodes matching this label are present.
+# Default value: 'region=infra'
+#openshift_hosted_router_selector='region=infra'
+#
+# Router replicas (optional)
+# Unless specified, openshift-ansible will calculate the replica count
+# based on the number of nodes matching the openshift router selector.
+#openshift_hosted_router_replicas=2
+#
+# Router force subdomain (optional)
+# A router path format to force on all routes used by this router
+# (will ignore the route host value)
+#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
+#
+# Router certificate (optional)
+# Provide local certificate paths which will be configured as the
+# router's default certificate.
+#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
+#
+# Disable management of the OpenShift Router
+#openshift_hosted_manage_router=false
+
+# Openshift Registry Options
+#
+# An OpenShift registry will be created during install if there are
+# nodes present with labels matching the default registry selector,
+# "region=infra". Set openshift_node_labels per node as needed in
+# order to label nodes.
+#
+# Example:
+# [nodes]
+# node.example.com openshift_node_labels="{'region': 'infra'}"
+#
+# Registry selector (optional)
+# Registry will only be created if nodes matching this label are present.
+# Default value: 'region=infra'
+#openshift_hosted_registry_selector='region=infra'
+#
+# Registry replicas (optional)
+# Unless specified, openshift-ansible will calculate the replica count
+# based on the number of nodes matching the openshift registry selector.
+#openshift_hosted_registry_replicas=2
+#
+# Disable management of the OpenShift Registry
+#openshift_hosted_manage_registry=false
+
+# Registry Storage Options
+#
+# NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/registry"
+#openshift_hosted_registry_storage_kind=nfs
+#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
+#openshift_hosted_registry_storage_nfs_directory=/exports
+#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_registry_storage_volume_name=registry
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
+# External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/registry"
+#openshift_hosted_registry_storage_kind=nfs
+#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
+#openshift_hosted_registry_storage_host=nfs.example.com
+#openshift_hosted_registry_storage_nfs_directory=/exports
+#openshift_hosted_registry_storage_volume_name=registry
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
+# Openstack
+# Volume must already exist.
+#openshift_hosted_registry_storage_kind=openstack
+#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_registry_storage_openstack_filesystem=ext4
+#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
+# AWS S3
+# S3 bucket must already exist.
+#openshift_hosted_registry_storage_kind=object
+#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
+#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
+#openshift_hosted_registry_storage_s3_bucket=bucket_name
+#openshift_hosted_registry_storage_s3_region=bucket_region
+#openshift_hosted_registry_storage_s3_chunksize=26214400
+#openshift_hosted_registry_storage_s3_rootdirectory=/registry
+#openshift_hosted_registry_pullthrough=true
+#openshift_hosted_registry_acceptschema2=true
+#openshift_hosted_registry_enforcequota=true
+#
+# Any S3 service (Minio, ExoScale, ...): Basically the same as above
+# but with regionendpoint configured
+# S3 bucket must already exist.
+#openshift_hosted_registry_storage_kind=object
+#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_accesskey=access_key_id
+#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
+#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
+#openshift_hosted_registry_storage_s3_bucket=bucket_name
+#openshift_hosted_registry_storage_s3_region=bucket_region
+#openshift_hosted_registry_storage_s3_chunksize=26214400
+#openshift_hosted_registry_storage_s3_rootdirectory=/registry
+#openshift_hosted_registry_pullthrough=true
+#openshift_hosted_registry_acceptschema2=true
+#openshift_hosted_registry_enforcequota=true
+
+# Metrics deployment
+# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
+#
+# By default metrics are not automatically deployed, set this to enable them
+# openshift_hosted_metrics_deploy=true
+#
+# Storage Options
+# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
+# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
+# Storage options A & B currently support only one cassandra pod which is
+# generally enough for up to 1000 pods. Additional volumes can be created
+# manually after the fact and metrics scaled per the docs.
+#
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/metrics"
+#openshift_hosted_metrics_storage_kind=nfs
+#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_metrics_storage_nfs_directory=/exports
+#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_metrics_storage_volume_name=metrics
+#openshift_hosted_metrics_storage_volume_size=10Gi
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/metrics"
+#openshift_hosted_metrics_storage_kind=nfs
+#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_metrics_storage_host=nfs.example.com
+#openshift_hosted_metrics_storage_nfs_directory=/exports
+#openshift_hosted_metrics_storage_volume_name=metrics
+#openshift_hosted_metrics_storage_volume_size=10Gi
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_hosted_metrics_storage_kind=dynamic
+#
+# Override metricsPublicURL in the master config for cluster metrics
+# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
+# Currently, you may only alter the hostname portion of the url, alterting the
+# `/hawkular/metrics` path will break installation of metrics.
+#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+
+# Logging deployment
+#
+# Currently logging deployment is disabled by default, enable it by setting this
+#openshift_hosted_logging_deploy=true
+#
+# Logging storage config
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/logging"
+#openshift_hosted_logging_storage_kind=nfs
+#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_logging_storage_nfs_directory=/exports
+#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_logging_storage_volume_name=logging
+#openshift_hosted_logging_storage_volume_size=10Gi
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/logging"
+#openshift_hosted_logging_storage_kind=nfs
+#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_logging_storage_host=nfs.example.com
+#openshift_hosted_logging_storage_nfs_directory=/exports
+#openshift_hosted_logging_storage_volume_name=logging
+#openshift_hosted_logging_storage_volume_size=10Gi
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_hosted_logging_storage_kind=dynamic
+#
+# Option D - none -- Logging will use emptydir volumes which are destroyed when
+# pods are deleted
+#
+# Other Logging Options -- Common items you may wish to reconfigure, for the complete
+# list of options please see roles/openshift_hosted_logging/README.md
+#
+# Configure loggingPublicURL in the master config for aggregate logging, defaults
+# to https://kibana.{{ openshift_master_default_subdomain }}
+#openshift_master_logging_public_url=https://kibana.example.com
+# Configure the number of elastic search nodes, unless you're using dynamic provisioning
+# this value must be 1
+#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_hosted_logging_hostname=logging.apps.example.com
+# Configure the prefix and version for the deployer image
+#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
+#openshift_hosted_logging_deployer_version=3.3.0
+
+# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
+# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
+
+# Disable the OpenShift SDN plugin
+# openshift_use_openshift_sdn=False
+
+# Configure SDN cluster network and kubernetes service CIDR blocks. These
+# network blocks should be private and should not conflict with network blocks
+# in your infrastructure that pods may require access to. Can not be changed
+# after deployment.
+#osm_cluster_network_cidr=10.128.0.0/14
+#openshift_portal_net=172.30.0.0/16
+
+
+# ExternalIPNetworkCIDRs controls what values are acceptable for the
+# service external IP field. If empty, no externalIP may be set. It
+# may contain a list of CIDRs which are checked for access. If a CIDR
+# is prefixed with !, IPs in that CIDR will be rejected. Rejections
+# will be applied first, then the IP checked against one of the
+# allowed CIDRs. You should ensure this range does not overlap with
+# your nodes, pods, or service CIDRs for security reasons.
+#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
+
+# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
+# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
+# be assigned. It may contain a single CIDR that will be allocated from. For
+# security reasons, you should ensure that this range does not overlap with
+# the CIDRs reserved for external IPs, nodes, pods, or services.
+#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
+
+# Configure number of bits to allocate to each host’s subnet e.g. 9
+# would mean a /23 network on the host.
+#osm_host_subnet_length=9
+
+# Configure master API and console ports.
+#openshift_master_api_port=8443
+#openshift_master_console_port=8443
+
+# set RPM version for debugging purposes
+#openshift_pkg_version=-3.1.0.0
+
+# Configure custom ca certificate
+#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
+#
+# NOTE: CA certificate will not be replaced with existing clusters.
+# This option may only be specified when creating a new cluster or
+# when redeploying cluster certificates with the redeploy-certificates
+# playbook. If replacing the CA certificate in an existing cluster
+# with a custom ca certificate, the following variable must also be
+# set.
+#openshift_certificates_redeploy_ca=true
+
+# Configure custom named certificates (SNI certificates)
+#
+# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html
+#
+# NOTE: openshift_master_named_certificates is cached on masters and is an
+# additive fact, meaning that each run with a different set of certificates
+# will add the newly provided certificates to the cached set of certificates.
+#
+# An optional CA may be specified for each named certificate. CAs will
+# be added to the OpenShift CA bundle which allows for the named
+# certificate to be served for internal cluster communication.
+#
+# If you would like openshift_master_named_certificates to be overwritten with
+# the provided value, specify openshift_master_overwrite_named_certificates.
+#openshift_master_overwrite_named_certificates=true
+#
+# Provide local certificate paths which will be deployed to masters
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
+#
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
+
+# Session options
+#openshift_master_session_name=ssn
+#openshift_master_session_max_seconds=3600
+
+# An authentication and encryption secret will be generated if secrets
+# are not provided. If provided, openshift_master_session_auth_secrets
+# and openshift_master_encryption_secrets must be equal length.
+#
+# Signing secrets, used to authenticate sessions using
+# HMAC. Recommended to use secrets with 32 or 64 bytes.
+#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+#
+# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
+# characters long, to select AES-128, AES-192, or AES-256.
+#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+
+# configure how often node iptables rules are refreshed
+#openshift_node_iptables_sync_period=5s
+
+# Configure nodeIP in the node config
+# This is needed in cases where node traffic is desired to go over an
+# interface other than the default network interface.
+#openshift_node_set_node_ip=True
+
+# Force setting of system hostname when configuring OpenShift
+# This works around issues related to installations that do not have valid dns
+# entries for the interfaces attached to the host.
+#openshift_set_hostname=True
+
+# Configure dnsIP in the node config
+#openshift_dns_ip=172.30.0.1
+
+# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+
+# Configure logrotate scripts
+# See: https://github.com/nickhammond/ansible-logrotate
+#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
+
+# openshift-ansible will wait indefinitely for your input when it detects that the
+# value of openshift_hostname resolves to an IP address not bound to any local
+# interfaces. This mis-configuration is problematic for any pod leveraging host
+# networking and liveness or readiness probes.
+# Setting this variable to true will override that check.
+#openshift_override_hostname_check=true
+
+# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
+# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
+# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
+# be used with 1.0 and 3.0.
+#openshift_use_dnsmasq=False
+# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
+# This is useful for POC environments where DNS may not actually be available yet.
+#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
+
+# Global Proxy Configuration
+# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
+# variables for docker and master services.
+#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
+#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
+#openshift_no_proxy='.hosts.example.com,some-host.com'
+#
+# Most environments don't require a proxy between openshift masters, nodes, and
+# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
+# If all of your hosts share a common domain you may wish to disable this and
+# specify that domain above.
+#openshift_generate_no_proxy_hosts=True
+#
+# These options configure the BuildDefaults admission controller which injects
+# environment variables into Builds. These values will default to the global proxy
+# config values. You only need to set these if they differ from the global settings
+# above. See BuildDefaults
+# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_no_proxy=build_defaults
+#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
+# Or you may optionally define your own serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"kind":"BuildDefaultsConfig","apiVersion":"v1","gitHTTPSProxy":"http://proxy.example.com.redhat.com:3128","gitHTTPProxy":"http://proxy.example.com.redhat.com:3128","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"HTTPS_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}]}}'
+# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
+#openshift_master_dynamic_provisioning_enabled=False
+
+# Configure usage of openshift_clock role.
+#openshift_clock_enabled=true
+
+# OpenShift Per-Service Environment Variables
+# Environment variables are added to /etc/sysconfig files for
+# each OpenShift service: node, master (api and controllers).
+# API and controllers environment variables are merged in single
+# master environments.
+#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
+#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
+#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
+
+# Enable API service auditing, available as of 3.2
+#openshift_master_audit_config={"basicAuditEnabled": true}
+
+# host group for masters
+[masters]
+ose3-master[1:3]-ansible.test.example.com
+
+[etcd]
+ose3-etcd[1:3]-ansible.test.example.com
+
+# NOTE: Containerized load balancer hosts are not yet supported, if using a global
+# containerized=true host variable we must set to false.
+[lb]
+ose3-lb-ansible.test.example.com containerized=false
+
+# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
+# However, in order to ensure that your masters are not burdened with running pods you should
+# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
+[nodes]
+ose3-master[1:3]-ansible.test.example.com
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py
index 3403f735e..99746cdbf 100755
--- a/inventory/gce/hosts/gce.py
+++ b/inventory/gce/hosts/gce.py
@@ -66,12 +66,22 @@ Examples:
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
- $ plugins/inventory/gce.py --host my_instance
+ $ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Version: 0.0.1
'''
+__requires__ = ['pycrypto>=2.6']
+try:
+ import pkg_resources
+except ImportError:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. We don't
+ # fail here as there is code that better expresses the errors where the
+ # library is used.
+ pass
+
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v1"
@@ -102,9 +112,9 @@ class GceInventory(object):
# Just display data for specific host
if self.args.host:
- print self.json_format_dict(self.node_to_dict(
+ print(self.json_format_dict(self.node_to_dict(
self.get_instance(self.args.host)),
- pretty=self.args.pretty)
+ pretty=self.args.pretty))
sys.exit(0)
# Otherwise, assume user wants all instances grouped
@@ -211,7 +221,7 @@ class GceInventory(object):
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
- 'gce_public_ip': inst.public_ips[0],
+ 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
@@ -220,14 +230,14 @@ class GceInventory(object):
'gce_metadata': md,
'gce_network': net,
# Hosts don't have a public name, so we add an IP
- 'ansible_ssh_host': inst.public_ips[0]
+ 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
}
def get_instance(self, instance_name):
'''Gets details about a specific instance '''
try:
return self.driver.ex_get_node(instance_name)
- except Exception, e:
+ except Exception as e:
return None
def group_instances(self):
@@ -247,7 +257,10 @@ class GceInventory(object):
tags = node.extra['tags']
for t in tags:
- tag = 'tag_%s' % t
+ if t.startswith('group-'):
+ tag = t[6:]
+ else:
+ tag = 'tag_%s' % t
if groups.has_key(tag): groups[tag].append(name)
else: groups[tag] = [name]
diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts
index 34a4396bd..3996e577e 100644
--- a/inventory/gce/hosts/hosts
+++ b/inventory/gce/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/hosts b/inventory/hosts
deleted file mode 100644
index 72b7ae646..000000000
--- a/inventory/hosts
+++ /dev/null
@@ -1,2 +0,0 @@
-# Eventually we'll add the GCE, AWS, etc dynamic inventories, but for now...
-localhost
diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts
index 34a4396bd..3996e577e 100644
--- a/inventory/libvirt/hosts/hosts
+++ b/inventory/libvirt/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
deleted file mode 100755
index b7ce9e5dc..000000000
--- a/inventory/multi_ec2.py
+++ /dev/null
@@ -1,363 +0,0 @@
-#!/usr/bin/env python2
-'''
- Fetch and combine multiple ec2 account settings into a single
- json hash.
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-from time import time
-import argparse
-import yaml
-import os
-import subprocess
-import json
-import errno
-import fcntl
-import tempfile
-import copy
-
-CONFIG_FILE_NAME = 'multi_ec2.yaml'
-DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
-
-class MultiEc2(object):
- '''
- MultiEc2 class:
- Opens a yaml config file and reads aws credentials.
- Stores a json hash of resources in result.
- '''
-
- def __init__(self, args=None):
- # Allow args to be passed when called as a library
- if not args:
- self.args = {}
- else:
- self.args = args
-
- self.cache_path = DEFAULT_CACHE_PATH
- self.config = None
- self.all_ec2_results = {}
- self.result = {}
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
- etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)
-
- # Prefer a file in the same directory, fall back to a file in etc
- if os.path.isfile(same_dir_config_file):
- self.config_file = same_dir_config_file
- elif os.path.isfile(etc_dir_config_file):
- self.config_file = etc_dir_config_file
- else:
- self.config_file = None # expect env vars
-
-
- def run(self):
- '''This method checks to see if the local
- cache is valid for the inventory.
-
- if the cache is valid; return cache
- else the credentials are loaded from multi_ec2.yaml or from the env
- and we attempt to get the inventory from the provider specified.
- '''
- # load yaml
- if self.config_file and os.path.isfile(self.config_file):
- self.config = self.load_yaml_config()
- elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
- os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
- # Build a default config
- self.config = {}
- self.config['accounts'] = [
- {
- 'name': 'default',
- 'cache_location': DEFAULT_CACHE_PATH,
- 'provider': 'aws/hosts/ec2.py',
- 'env_vars': {
- 'AWS_ACCESS_KEY_ID': os.environ["AWS_ACCESS_KEY_ID"],
- 'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
- }
- },
- ]
-
- self.config['cache_max_age'] = 0
- else:
- raise RuntimeError("Could not find valid ec2 credentials in the environment.")
-
- if self.config.has_key('cache_location'):
- self.cache_path = self.config['cache_location']
-
- if self.args.get('refresh_cache', None):
- self.get_inventory()
- self.write_to_cache()
- # if its a host query, fetch and do not cache
- elif self.args.get('host', None):
- self.get_inventory()
- elif not self.is_cache_valid():
- # go fetch the inventories and cache them if cache is expired
- self.get_inventory()
- self.write_to_cache()
- else:
- # get data from disk
- self.get_inventory_from_cache()
-
- def load_yaml_config(self, conf_file=None):
- """Load a yaml config file with credentials to query the
- respective cloud for inventory.
- """
- config = None
-
- if not conf_file:
- conf_file = self.config_file
-
- with open(conf_file) as conf:
- config = yaml.safe_load(conf)
-
- return config
-
- def get_provider_tags(self, provider, env=None):
- """Call <provider> and query all of the tags that are usuable
- by ansible. If environment is empty use the default env.
- """
- if not env:
- env = os.environ
-
- # Allow relatively path'd providers in config file
- if os.path.isfile(os.path.join(self.file_path, provider)):
- provider = os.path.join(self.file_path, provider)
-
- # check to see if provider exists
- if not os.path.isfile(provider) or not os.access(provider, os.X_OK):
- raise RuntimeError("Problem with the provider. Please check path " \
- "and that it is executable. (%s)" % provider)
-
- cmds = [provider]
- if self.args.get('host', None):
- cmds.append("--host")
- cmds.append(self.args.get('host', None))
- else:
- cmds.append('--list')
-
- cmds.append('--refresh-cache')
-
- return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
- stdout=subprocess.PIPE, env=env)
-
- @staticmethod
- def generate_config(config_data):
- """Generate the ec2.ini file in as a secure temp file.
- Once generated, pass it to the ec2.py as an environment variable.
- """
- fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.')
- for section, values in config_data.items():
- os.write(fildes, "[%s]\n" % section)
- for option, value in values.items():
- os.write(fildes, "%s = %s\n" % (option, value))
- os.close(fildes)
- return tmp_file_path
-
- def run_provider(self):
- '''Setup the provider call with proper variables
- and call self.get_provider_tags.
- '''
- try:
- all_results = []
- tmp_file_paths = []
- processes = {}
- for account in self.config['accounts']:
- env = account['env_vars']
- if account.has_key('provider_config'):
- tmp_file_paths.append(MultiEc2.generate_config(account['provider_config']))
- env['EC2_INI_PATH'] = tmp_file_paths[-1]
- name = account['name']
- provider = account['provider']
- processes[name] = self.get_provider_tags(provider, env)
-
- # for each process collect stdout when its available
- for name, process in processes.items():
- out, err = process.communicate()
- all_results.append({
- "name": name,
- "out": out.strip(),
- "err": err.strip(),
- "code": process.returncode
- })
-
- finally:
- # Clean up the mkstemp file
- for tmp_file in tmp_file_paths:
- os.unlink(tmp_file)
-
- return all_results
-
- def get_inventory(self):
- """Create the subprocess to fetch tags from a provider.
- Host query:
- Query to return a specific host. If > 1 queries have
- results then fail.
-
- List query:
- Query all of the different accounts for their tags. Once completed
- store all of their results into one merged updated hash.
- """
- provider_results = self.run_provider()
-
- # process --host results
- # For any 0 result, return it
- if self.args.get('host', None):
- count = 0
- for results in provider_results:
- if results['code'] == 0 and results['err'] == '' and results['out'] != '{}':
- self.result = json.loads(results['out'])
- count += 1
- if count > 1:
- raise RuntimeError("Found > 1 results for --host %s. \
- This is an invalid state." % self.args.get('host', None))
- # process --list results
- else:
- # For any non-zero, raise an error on it
- for result in provider_results:
- if result['code'] != 0:
- err_msg = ['\nProblem fetching account: {name}',
- 'Error Code: {code}',
- 'StdErr: {err}',
- 'Stdout: {out}',
- ]
- raise RuntimeError('\n'.join(err_msg).format(**result))
- else:
- self.all_ec2_results[result['name']] = json.loads(result['out'])
-
- # Check if user wants extra vars in yaml by
- # having hostvars and all_group defined
- for acc_config in self.config['accounts']:
- self.apply_account_config(acc_config)
-
- # Build results by merging all dictionaries
- values = self.all_ec2_results.values()
- values.insert(0, self.result)
- for result in values:
- MultiEc2.merge_destructively(self.result, result)
-
- def apply_account_config(self, acc_config):
- ''' Apply account config settings
- '''
- if not acc_config.has_key('hostvars') and not acc_config.has_key('all_group'):
- return
-
- results = self.all_ec2_results[acc_config['name']]
- # Update each hostvar with the newly desired key: value
- for host_property, value in acc_config['hostvars'].items():
- # Verify the account results look sane
- # by checking for these keys ('_meta' and 'hostvars' exist)
- if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
- for data in results['_meta']['hostvars'].values():
- data[str(host_property)] = str(value)
-
- # Add this group
- if results.has_key(acc_config['all_group']):
- results["%s_%s" % (host_property, value)] = \
- copy.copy(results[acc_config['all_group']])
-
- # store the results back into all_ec2_results
- self.all_ec2_results[acc_config['name']] = results
-
- @staticmethod
- def merge_destructively(input_a, input_b):
- "merges b into input_a"
- for key in input_b:
- if key in input_a:
- if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
- MultiEc2.merge_destructively(input_a[key], input_b[key])
- elif input_a[key] == input_b[key]:
- pass # same leaf value
- # both lists so add each element in b to a if it does ! exist
- elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
- for result in input_b[key]:
- if result not in input_a[key]:
- input_a[key].append(result)
- # a is a list and not b
- elif isinstance(input_a[key], list):
- if input_b[key] not in input_a[key]:
- input_a[key].append(input_b[key])
- elif isinstance(input_b[key], list):
- input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]]
- else:
- input_a[key] = [input_a[key], input_b[key]]
- else:
- input_a[key] = input_b[key]
- return input_a
-
- def is_cache_valid(self):
- ''' Determines if the cache files have expired, or if it is still valid '''
-
- if os.path.isfile(self.cache_path):
- mod_time = os.path.getmtime(self.cache_path)
- current_time = time()
- if (mod_time + self.config['cache_max_age']) > current_time:
- return True
-
- return False
-
- def parse_cli_args(self):
- ''' Command line argument processing '''
-
- parser = argparse.ArgumentParser(
- description='Produce an Ansible Inventory file based on a provider')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Fetch cached only instances (default: False)')
- parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
- parser.add_argument('--host', action='store', default=False,
- help='Get all the variables about a specific instance')
- self.args = parser.parse_args().__dict__
-
- def write_to_cache(self):
- ''' Writes data in JSON format to a file '''
-
- # if it does not exist, try and create it.
- if not os.path.isfile(self.cache_path):
- path = os.path.dirname(self.cache_path)
- try:
- os.makedirs(path)
- except OSError as exc:
- if exc.errno != errno.EEXIST or not os.path.isdir(path):
- raise
-
- json_data = MultiEc2.json_format_dict(self.result, True)
- with open(self.cache_path, 'w') as cache:
- try:
- fcntl.flock(cache, fcntl.LOCK_EX)
- cache.write(json_data)
- finally:
- fcntl.flock(cache, fcntl.LOCK_UN)
-
- def get_inventory_from_cache(self):
- ''' Reads the inventory from the cache file and returns it as a JSON
- object '''
-
- if not os.path.isfile(self.cache_path):
- return None
-
- with open(self.cache_path, 'r') as cache:
- self.result = json.loads(cache.read())
-
- return True
-
- @classmethod
- def json_format_dict(cls, data, pretty=False):
- ''' Converts a dict to a JSON object and dumps it as a formatted
- string '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
- def result_str(self):
- '''Return cache string stored in self.result'''
- return self.json_format_dict(self.result, True)
-
-
-if __name__ == "__main__":
- MEC2 = MultiEc2()
- MEC2.parse_cli_args()
- MEC2.run()
- print MEC2.result_str()
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
deleted file mode 100644
index 99f157b11..000000000
--- a/inventory/multi_ec2.yaml.example
+++ /dev/null
@@ -1,32 +0,0 @@
-# multi ec2 inventory configs
-#
-cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache
-
-accounts:
- - name: aws1
- provider: aws/hosts/ec2.py
- provider_config:
- ec2:
- regions: all
- regions_exclude: us-gov-west-1,cn-north-1
- destination_variable: public_dns_name
- route53: False
- cache_path: ~/.ansible/tmp
- cache_max_age: 300
- vpc_destination_variable: ip_address
- env_vars:
- AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
- AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- all_group: ec2
- hostvars:
- cloud: aws
- account: aws1
-
-- name: aws2
- provider: aws/hosts/ec2.py
- env_vars:
- AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
- AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- EC2_INI_PATH: /etc/ansible/ec2.ini
-
-cache_max_age: 60
diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
deleted file mode 100644
index 900a27f3a..000000000
--- a/inventory/openshift-ansible-inventory.spec
+++ /dev/null
@@ -1,82 +0,0 @@
-Summary: OpenShift Ansible Inventories
-Name: openshift-ansible-inventory
-Version: 0.0.8
-Release: 1%{?dist}
-License: ASL 2.0
-URL: https://github.com/openshift/openshift-ansible
-Source0: %{name}-%{version}.tar.gz
-Requires: python2
-BuildRequires: python2-devel
-BuildArch: noarch
-
-%description
-Ansible Inventories used with the openshift-ansible scripts and playbooks.
-
-%prep
-%setup -q
-
-%build
-
-%install
-mkdir -p %{buildroot}/etc/ansible
-mkdir -p %{buildroot}/usr/share/ansible/inventory
-mkdir -p %{buildroot}/usr/share/ansible/inventory/aws
-mkdir -p %{buildroot}/usr/share/ansible/inventory/gce
-
-cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory
-cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
-cp -p aws/hosts/ec2.py %{buildroot}/usr/share/ansible/inventory/aws
-cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
-
-%files
-%config(noreplace) /etc/ansible/*
-%dir /usr/share/ansible/inventory
-/usr/share/ansible/inventory/multi_ec2.py*
-/usr/share/ansible/inventory/aws/ec2.py*
-/usr/share/ansible/inventory/gce/gce.py*
-
-%changelog
-* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.8-1
-- Added more verbosity when error happens. Also fixed a bug.
- (kwoodson@redhat.com)
-- Implement OpenStack provider (lhuard@amadeus.com)
-- * rename openshift_registry_url oreg_url * rename option_images to
- _{oreg|ortr}_images (jhonce@redhat.com)
-- Fix the remaining pylint warnings (lhuard@amadeus.com)
-- Fix some of the pylint warnings (lhuard@amadeus.com)
-- [libvirt cluster] Use net-dhcp-leases to find VMs’ IPs (lhuard@amadeus.com)
-- fixed the openshift-ansible-bin build (twiest@redhat.com)
-
-* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.7-1
-- Making multi_ec2 into a library (kwoodson@redhat.com)
-
-* Wed May 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
-- Added support for grouping and a bug fix. (kwoodson@redhat.com)
-
-* Tue May 12 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
-- removed ec2.ini from the openshift-ansible-inventory.spec file so that we're
- not dictating what the ec2.ini file should look like. (twiest@redhat.com)
-- Added capability to pass in ec2.ini file. (kwoodson@redhat.com)
-
-* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
-- Fixed a bug due to renaming of variables. (kwoodson@redhat.com)
-
-* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
-- fixed build problems with openshift-ansible-inventory.spec
- (twiest@redhat.com)
-- Allow option in multi_ec2 to set cache location. (kwoodson@redhat.com)
-- Add ansible_connection=local to localhost in inventory (jdetiber@redhat.com)
-- Adding refresh-cache option and cleanup for pylint. Also updated for
- aws/hosts/ being added. (kwoodson@redhat.com)
-
-* Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
-- added the ability to have a config file in /etc/openshift_ansible to
- multi_ec2.py. (twiest@redhat.com)
-- Merge pull request #97 from jwhonce/wip/cluster (jhonce@redhat.com)
-- gce inventory/playbook updates for node registration changes
- (jdetiber@redhat.com)
-- Various fixes (jdetiber@redhat.com)
-
-* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
-- new package built with tito
-
diff --git a/inventory/openstack/hosts/hosts b/inventory/openstack/hosts/hosts
index 9cdc31449..9b63e98f4 100644
--- a/inventory/openstack/hosts/hosts
+++ b/inventory/openstack/hosts/hosts
@@ -1 +1 @@
-localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 connection=local
+localhost ansible_become=no ansible_python_interpreter='/usr/bin/env python2' connection=local
diff --git a/inventory/openstack/hosts/nova.ini b/inventory/openstack/hosts/nova.ini
deleted file mode 100644
index 4900c4965..000000000
--- a/inventory/openstack/hosts/nova.ini
+++ /dev/null
@@ -1,45 +0,0 @@
-# Ansible OpenStack external inventory script
-
-[openstack]
-
-#-------------------------------------------------------------------------
-# Required settings
-#-------------------------------------------------------------------------
-
-# API version
-version = 2
-
-# OpenStack nova username
-username =
-
-# OpenStack nova api_key or password
-api_key =
-
-# OpenStack nova auth_url
-auth_url =
-
-# OpenStack nova project_id or tenant name
-project_id =
-
-#-------------------------------------------------------------------------
-# Optional settings
-#-------------------------------------------------------------------------
-
-# Authentication system
-# auth_system = keystone
-
-# Serverarm region name to use
-# region_name =
-
-# Specify a preference for public or private IPs (public is default)
-# prefer_private = False
-
-# What service type (required for newer nova client)
-# service_type = compute
-
-
-# TODO: Some other options
-# insecure =
-# endpoint_type =
-# extensions =
-# service_name =
diff --git a/inventory/openstack/hosts/nova.py b/inventory/openstack/hosts/nova.py
deleted file mode 100755
index d5bd8d1ee..000000000
--- a/inventory/openstack/hosts/nova.py
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/env python2
-
-# pylint: skip-file
-
-# (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-import re
-import os
-import ConfigParser
-from novaclient import client as nova_client
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-###################################################
-# executed with no parameters, return the list of
-# all groups and hosts
-
-NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini",
- os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")),
- "/etc/ansible/nova.ini"]
-
-NOVA_DEFAULTS = {
- 'auth_system': None,
- 'region_name': None,
- 'service_type': 'compute',
-}
-
-
-def nova_load_config_file():
- p = ConfigParser.SafeConfigParser(NOVA_DEFAULTS)
-
- for path in NOVA_CONFIG_FILES:
- if os.path.exists(path):
- p.read(path)
- return p
-
- return None
-
-
-def get_fallback(config, value, section="openstack"):
- """
- Get value from config object and return the value
- or false
- """
- try:
- return config.get(section, value)
- except ConfigParser.NoOptionError:
- return False
-
-
-def push(data, key, element):
- """
- Assist in items to a dictionary of lists
- """
- if (not element) or (not key):
- return
-
- if key in data:
- data[key].append(element)
- else:
- data[key] = [element]
-
-
-def to_safe(word):
- '''
- Converts 'bad' characters in a string to underscores so they can
- be used as Ansible groups
- '''
- return re.sub(r"[^A-Za-z0-9\-]", "_", word)
-
-
-def get_ips(server, access_ip=True):
- """
- Returns a list of the server's IPs, or the preferred
- access IP
- """
- private = []
- public = []
- address_list = []
- # Iterate through each servers network(s), get addresses and get type
- addresses = getattr(server, 'addresses', {})
- if len(addresses) > 0:
- for network in addresses.itervalues():
- for address in network:
- if address.get('OS-EXT-IPS:type', False) == 'fixed':
- private.append(address['addr'])
- elif address.get('OS-EXT-IPS:type', False) == 'floating':
- public.append(address['addr'])
-
- if not access_ip:
- address_list.append(server.accessIPv4)
- address_list.extend(private)
- address_list.extend(public)
- return address_list
-
- access_ip = None
- # Append group to list
- if server.accessIPv4:
- access_ip = server.accessIPv4
- if (not access_ip) and public and not (private and prefer_private):
- access_ip = public[0]
- if private and not access_ip:
- access_ip = private[0]
-
- return access_ip
-
-
-def get_metadata(server):
- """Returns dictionary of all host metadata"""
- get_ips(server, False)
- results = {}
- for key in vars(server):
- # Extract value
- value = getattr(server, key)
-
- # Generate sanitized key
- key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower()
-
- # Att value to instance result (exclude manager class)
- #TODO: maybe use value.__class__ or similar inside of key_name
- if key != 'os_manager':
- results[key] = value
- return results
-
-config = nova_load_config_file()
-if not config:
- sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES))
-
-# Load up connections info based on config and then environment
-# variables
-username = (get_fallback(config, 'username') or
- os.environ.get('OS_USERNAME', None))
-api_key = (get_fallback(config, 'api_key') or
- os.environ.get('OS_PASSWORD', None))
-auth_url = (get_fallback(config, 'auth_url') or
- os.environ.get('OS_AUTH_URL', None))
-project_id = (get_fallback(config, 'project_id') or
- os.environ.get('OS_TENANT_NAME', None))
-region_name = (get_fallback(config, 'region_name') or
- os.environ.get('OS_REGION_NAME', None))
-auth_system = (get_fallback(config, 'auth_system') or
- os.environ.get('OS_AUTH_SYSTEM', None))
-
-# Determine what type of IP is preferred to return
-prefer_private = False
-try:
- prefer_private = config.getboolean('openstack', 'prefer_private')
-except ConfigParser.NoOptionError:
- pass
-
-client = nova_client.Client(
- version=config.get('openstack', 'version'),
- username=username,
- api_key=api_key,
- auth_url=auth_url,
- region_name=region_name,
- project_id=project_id,
- auth_system=auth_system,
- service_type=config.get('openstack', 'service_type'),
-)
-
-# Default or added list option
-if (len(sys.argv) == 2 and sys.argv[1] == '--list') or len(sys.argv) == 1:
- groups = {'_meta': {'hostvars': {}}}
- # Cycle on servers
- for server in client.servers.list():
- access_ip = get_ips(server)
-
- # Push to name group of 1
- push(groups, server.name, access_ip)
-
- # Run through each metadata item and add instance to it
- for key, value in server.metadata.iteritems():
- composed_key = to_safe('tag_{0}_{1}'.format(key, value))
- push(groups, composed_key, access_ip)
-
- # Do special handling of group for backwards compat
- # inventory groups
- group = server.metadata['group'] if 'group' in server.metadata else 'undefined'
- push(groups, group, access_ip)
-
- # Add vars to _meta key for performance optimization in
- # Ansible 1.3+
- groups['_meta']['hostvars'][access_ip] = get_metadata(server)
-
- # Return server list
- print(json.dumps(groups, sort_keys=True, indent=2))
- sys.exit(0)
-
-#####################################################
-# executed with a hostname as a parameter, return the
-# variables for that host
-
-elif len(sys.argv) == 3 and (sys.argv[1] == '--host'):
- results = {}
- ips = []
- for server in client.servers.list():
- if sys.argv[2] in (get_ips(server) or []):
- results = get_metadata(server)
- print(json.dumps(results, sort_keys=True, indent=2))
- sys.exit(0)
-
-else:
- print "usage: --list ..OR.. --host <hostname>"
- sys.exit(1)
diff --git a/inventory/openstack/hosts/openstack.py b/inventory/openstack/hosts/openstack.py
new file mode 100755
index 000000000..0d92eae11
--- /dev/null
+++ b/inventory/openstack/hosts/openstack.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
+# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>
+# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2016, Rackspace Australia
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+# The OpenStack Inventory module uses os-client-config for configuration.
+# https://github.com/stackforge/os-client-config
+# This means it will either:
+# - Respect normal OS_* environment variables like other OpenStack tools
+# - Read values from a clouds.yaml file.
+# If you want to configure via clouds.yaml, you can put the file in:
+# - Current directory
+# - ~/.config/openstack/clouds.yaml
+# - /etc/openstack/clouds.yaml
+# - /etc/ansible/openstack.yml
+# The clouds.yaml file can contain entries for multiple clouds and multiple
+# regions of those clouds. If it does, this inventory module will connect to
+# all of them and present them as one contiguous inventory.
+#
+# See the adjacent openstack.yml file for an example config file
+# There are two ansible inventory specific options that can be set in
+# the inventory section.
+# expand_hostvars controls whether or not the inventory will make extra API
+# calls to fill out additional information about each server
+# use_hostnames changes the behavior from registering every host with its UUID
+# and making a group of its hostname to only doing this if the
+# hostname in question has more than one server
+# fail_on_errors causes the inventory to fail and return no hosts if one cloud
+# has failed (for example, bad credentials or being offline).
+# When set to False, the inventory will return hosts from
+# whichever other clouds it can contact. (Default: True)
+
+import argparse
+import collections
+import os
+import sys
+import time
+from distutils.version import StrictVersion
+
+try:
+ import json
+except:
+ import simplejson as json
+
+import os_client_config
+import shade
+import shade.inventory
+
+CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
+
+
+def get_groups_from_server(server_vars, namegroup=True):
+ groups = []
+
+ region = server_vars['region']
+ cloud = server_vars['cloud']
+ metadata = server_vars.get('metadata', {})
+
+ # Create a group for the cloud
+ groups.append(cloud)
+
+ # Create a group on region
+ groups.append(region)
+
+ # And one by cloud_region
+ groups.append("%s_%s" % (cloud, region))
+
+ # Check if group metadata key in servers' metadata
+ if 'group' in metadata:
+ groups.append(metadata['group'])
+
+ for extra_group in metadata.get('groups', '').split(','):
+ if extra_group:
+ groups.append(extra_group.strip())
+
+ groups.append('instance-%s' % server_vars['id'])
+ if namegroup:
+ groups.append(server_vars['name'])
+
+ for key in ('flavor', 'image'):
+ if 'name' in server_vars[key]:
+ groups.append('%s-%s' % (key, server_vars[key]['name']))
+
+ for key, value in iter(metadata.items()):
+ groups.append('meta-%s_%s' % (key, value))
+
+ az = server_vars.get('az', None)
+ if az:
+ # Make groups for az, region_az and cloud_region_az
+ groups.append(az)
+ groups.append('%s_%s' % (region, az))
+ groups.append('%s_%s_%s' % (cloud, region, az))
+ return groups
+
+
+def get_host_groups(inventory, refresh=False):
+ (cache_file, cache_expiration_time) = get_cache_settings()
+ if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
+ groups = to_json(get_host_groups_from_cloud(inventory))
+ open(cache_file, 'w').write(groups)
+ else:
+ groups = open(cache_file, 'r').read()
+ return groups
+
+
+def append_hostvars(hostvars, groups, key, server, namegroup=False):
+ hostvars[key] = dict(
+ ansible_ssh_host=server['interface_ip'],
+ openstack=server)
+ for group in get_groups_from_server(server, namegroup=namegroup):
+ groups[group].append(key)
+
+
+def get_host_groups_from_cloud(inventory):
+ groups = collections.defaultdict(list)
+ firstpass = collections.defaultdict(list)
+ hostvars = {}
+ list_args = {}
+ if hasattr(inventory, 'extra_config'):
+ use_hostnames = inventory.extra_config['use_hostnames']
+ list_args['expand'] = inventory.extra_config['expand_hostvars']
+ if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"):
+ list_args['fail_on_cloud_config'] = \
+ inventory.extra_config['fail_on_errors']
+ else:
+ use_hostnames = False
+
+ for server in inventory.list_hosts(**list_args):
+
+ if 'interface_ip' not in server:
+ continue
+ firstpass[server['name']].append(server)
+ for name, servers in firstpass.items():
+ if len(servers) == 1 and use_hostnames:
+ append_hostvars(hostvars, groups, name, servers[0])
+ else:
+ server_ids = set()
+ # Trap for duplicate results
+ for server in servers:
+ server_ids.add(server['id'])
+ if len(server_ids) == 1 and use_hostnames:
+ append_hostvars(hostvars, groups, name, servers[0])
+ else:
+ for server in servers:
+ append_hostvars(
+ hostvars, groups, server['id'], server,
+ namegroup=True)
+ groups['_meta'] = {'hostvars': hostvars}
+ return groups
+
+
+def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
+ ''' Determines if cache file has expired, or if it is still valid '''
+ if refresh:
+ return True
+ if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
+ mod_time = os.path.getmtime(cache_file)
+ current_time = time.time()
+ if (mod_time + cache_expiration_time) > current_time:
+ return False
+ return True
+
+
+def get_cache_settings():
+ config = os_client_config.config.OpenStackConfig(
+ config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES)
+ # For inventory-wide caching
+ cache_expiration_time = config.get_cache_expiration_time()
+ cache_path = config.get_cache_path()
+ if not os.path.exists(cache_path):
+ os.makedirs(cache_path)
+ cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
+ return (cache_file, cache_expiration_time)
+
+
+def to_json(in_dict):
+ return json.dumps(in_dict, sort_keys=True, indent=2)
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
+ parser.add_argument('--private',
+ action='store_true',
+ help='Use private address for ansible host')
+ parser.add_argument('--refresh', action='store_true',
+ help='Refresh cached information')
+ parser.add_argument('--debug', action='store_true', default=False,
+ help='Enable debug output')
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument('--list', action='store_true',
+ help='List active servers')
+ group.add_argument('--host', help='List details about the specific host')
+
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+ try:
+ config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES
+ shade.simple_logging(debug=args.debug)
+ inventory_args = dict(
+ refresh=args.refresh,
+ config_files=config_files,
+ private=args.private,
+ )
+ if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
+ inventory_args.update(dict(
+ config_key='ansible',
+ config_defaults={
+ 'use_hostnames': False,
+ 'expand_hostvars': True,
+ 'fail_on_errors': True,
+ }
+ ))
+
+ inventory = shade.inventory.OpenStackInventory(**inventory_args)
+
+ if args.list:
+ output = get_host_groups(inventory, refresh=args.refresh)
+ elif args.host:
+ output = to_json(inventory.get_host(args.host))
+ print(output)
+ except shade.OpenStackCloudException as e:
+ sys.stderr.write('%s\n' % e.message)
+ sys.exit(1)
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()