summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README_AEP.md4
-rw-r--r--README_OSE.md2
-rwxr-xr-xbin/cluster5
-rwxr-xr-xbin/ohi47
-rw-r--r--bin/openshift_ansible/awsutil.py85
-rwxr-xr-xbin/opssh49
-rwxr-xr-xbin/oscp19
-rwxr-xr-xbin/ossh30
-rwxr-xr-xbin/ossh_bash_completion12
-rw-r--r--bin/ossh_zsh_completion6
-rw-r--r--bin/zsh_functions/_ossh2
-rw-r--r--filter_plugins/oo_filters.py18
-rw-r--r--openshift-ansible.spec15
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml29
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml1
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml15
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml31
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml12
-rw-r--r--roles/etcd/tasks/main.yml16
-rw-r--r--roles/openshift_cli/tasks/main.yml17
-rw-r--r--roles/openshift_common/tasks/main.yml3
-rw-r--r--roles/openshift_master/tasks/main.yml16
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j24
-rw-r--r--roles/openshift_master_ca/tasks/main.yml10
-rw-r--r--roles/openshift_node/tasks/main.yml29
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/oso_host_monitoring/handlers/main.yml6
-rw-r--r--roles/oso_host_monitoring/tasks/main.yml20
-rw-r--r--roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j243
-rw-r--r--roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 (renamed from roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2)48
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml18
-rw-r--r--roles/rhel_subscribe/tasks/main.yml2
-rw-r--r--utils/src/ooinstall/cli_installer.py15
35 files changed, 298 insertions, 339 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index caa4ded81..271e86d20 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.26-1 ./
+3.0.28-1 ./
diff --git a/README_AEP.md b/README_AEP.md
index 584a7afff..7cdb1c5d5 100644
--- a/README_AEP.md
+++ b/README_AEP.md
@@ -81,10 +81,10 @@ deployment_type=atomic-enterprise
# Pre-release registry URL; note that in the future these images
# may have an atomicenterprise/aep- prefix or so.
-oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
+oreg_url=rcm-img-docker:5001/openshift3/ose-${component}:${version}
# Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/AtomicOpenShift/3.1/2015-10-27.1', 'enabled': 1, 'gpgcheck': 0}]
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm/puddle/build/AtomicOpenShift/3.1/2015-10-27.1', 'enabled': 1, 'gpgcheck': 0}]
# host group for masters
[masters]
diff --git a/README_OSE.md b/README_OSE.md
index 66fba33e5..fdb6a75b8 100644
--- a/README_OSE.md
+++ b/README_OSE.md
@@ -82,7 +82,7 @@ deployment_type=enterprise
# Pre-release additional repo
openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
'baseurl':
-'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
+'http://buildvm/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
'enabled': 1, 'gpgcheck': 0}]
# Origin copr repo
diff --git a/bin/cluster b/bin/cluster
index 3081ebd4a..c2765ff92 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -294,11 +294,8 @@ if __name__ == '__main__':
meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
meta_parser.add_argument('-t', '--deployment-type',
- choices=['origin', 'online', 'enterprise'],
+ choices=['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise'],
help='Deployment type. (default: origin)')
- meta_parser.add_argument('-T', '--product-type',
- choices=['openshift', 'atomic-enterprise'],
- help='Product type. (default: openshift)')
meta_parser.add_argument('-o', '--option', action='append',
help='options')
diff --git a/bin/ohi b/bin/ohi
index be9c53ec0..d71a4c4b1 100755
--- a/bin/ohi
+++ b/bin/ohi
@@ -48,28 +48,18 @@ class Ohi(object):
self.aws.print_host_types()
return 0
- hosts = None
- if self.args.host_type is not None and \
- self.args.env is not None:
- # Both env and host-type specified
- hosts = self.aws.get_host_list(host_type=self.args.host_type,
- envs=self.args.env,
- version=self.args.openshift_version,
- cached=self.args.cache_only)
-
- if self.args.host_type is None and \
- self.args.env is not None:
- # Only env specified
- hosts = self.aws.get_host_list(envs=self.args.env,
- version=self.args.openshift_version,
- cached=self.args.cache_only)
-
- if self.args.host_type is not None and \
- self.args.env is None:
- # Only host-type specified
- hosts = self.aws.get_host_list(host_type=self.args.host_type,
- version=self.args.openshift_version,
- cached=self.args.cache_only)
+ if self.args.v3:
+ version = '3'
+ elif self.args.all_versions:
+ version = 'all'
+ else:
+ version = '2'
+
+ hosts = self.aws.get_host_list(clusters=self.args.cluster,
+ host_type=self.args.host_type,
+ envs=self.args.env,
+ version=version,
+ cached=self.args.cache_only)
if hosts is None:
# We weren't able to determine what they wanted to do
@@ -104,19 +94,26 @@ class Ohi(object):
parser = argparse.ArgumentParser(description='OpenShift Host Inventory')
parser.add_argument('--list-host-types', default=False, action='store_true', help='List all of the host types')
+ parser.add_argument('--list', default=False, action='store_true', help='List all hosts')
- parser.add_argument('-e', '--env', action="store", help="Which environment to use")
+ parser.add_argument('-c', '--cluster', action="append", help="Which clusterid to use")
+ parser.add_argument('-e', '--env', action="append", help="Which environment to use")
parser.add_argument('-t', '--host-type', action="store", help="Which host type to use")
parser.add_argument('-l', '--user', action='store', default=None, help='username')
- parser.add_argument('-c', '--cache-only', action='store_true', default=False,
+ parser.add_argument('--cache-only', action='store_true', default=False,
help='Retrieve the host inventory by cache only. Default is false.')
- parser.add_argument('-o', '--openshift-version', action='store', default='2',
+ parser.add_argument('--v2', action='store_true', default=True,
help='Specify the openshift version. Default is 2')
+ parser.add_argument('--v3', action='store_true', default=False,
+ help='Specify the openshift version.')
+
+ parser.add_argument('--all-versions', action='store_true', default=False,
+ help='Specify the openshift version. Return all versions')
self.args = parser.parse_args()
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
index 76b4f4f51..e03c0ab15 100644
--- a/bin/openshift_ansible/awsutil.py
+++ b/bin/openshift_ansible/awsutil.py
@@ -59,9 +59,23 @@ class AwsUtil(object):
minv.run()
return minv.result
+ def get_clusters(self):
+ """Searches for cluster tags in the inventory and returns all of the clusters found."""
+ pattern = re.compile(r'^oo_clusterid_(.*)')
+
+ clusters = []
+ inv = self.get_inventory()
+ for key in inv.keys():
+ matched = pattern.match(key)
+ if matched:
+ clusters.append(matched.group(1))
+
+ clusters.sort()
+ return clusters
+
def get_environments(self):
"""Searches for env tags in the inventory and returns all of the envs found."""
- pattern = re.compile(r'^tag_environment_(.*)')
+ pattern = re.compile(r'^oo_environment_(.*)')
envs = []
inv = self.get_inventory()
@@ -75,7 +89,7 @@ class AwsUtil(object):
def get_host_types(self):
"""Searches for host-type tags in the inventory and returns all host-types found."""
- pattern = re.compile(r'^tag_host-type_(.*)')
+ pattern = re.compile(r'^oo_host-type_(.*)')
host_types = []
inv = self.get_inventory()
@@ -154,6 +168,18 @@ class AwsUtil(object):
return host_type
@staticmethod
+ def gen_version_tag(ver):
+ """Generate the version tag
+ """
+ return "oo_version_%s" % ver
+
+ @staticmethod
+ def gen_clusterid_tag(clu):
+ """Generate the clusterid tag
+ """
+ return "tag_clusterid_%s" % clu
+
+ @staticmethod
def gen_env_tag(env):
"""Generate the environment tag
"""
@@ -165,41 +191,44 @@ class AwsUtil(object):
host_type = self.resolve_host_type(host_type)
return "tag_host-type_%s" % host_type
- def get_host_list(self, host_type=None, envs=None, version=None, cached=False):
+ # This function uses all of these params to perform a filters on our host inventory.
+ # pylint: disable=too-many-arguments
+ def get_host_list(self, clusters=None, host_type=None, envs=None, version=None, cached=False):
"""Get the list of hosts from the inventory using host-type and environment
"""
retval = set([])
envs = envs or []
+
inv = self.get_inventory(cached=cached)
- # We prefer to deal with a list of environments
- if issubclass(type(envs), basestring):
- if envs == 'all':
- envs = self.get_environments()
+ retval.update(inv.get('all_hosts', []))
+
+ if clusters:
+ cluster_hosts = set([])
+ if len(clusters) > 1:
+ for cluster in clusters:
+ clu_tag = AwsUtil.gen_clusterid_tag(cluster)
+ cluster_hosts.update(inv.get(clu_tag, []))
else:
- envs = [envs]
+ cluster_hosts.update(inv.get(AwsUtil.gen_clusterid_tag(clusters[0]), []))
+
+ retval.intersection_update(cluster_hosts)
+
+ if envs:
+ env_hosts = set([])
+ if len(envs) > 1:
+ for env in envs:
+ env_tag = AwsUtil.gen_env_tag(env)
+ env_hosts.update(inv.get(env_tag, []))
+ else:
+ env_hosts.update(inv.get(AwsUtil.gen_env_tag(envs[0]), []))
+
+ retval.intersection_update(env_hosts)
- if host_type and envs:
- # Both host type and environment were specified
- for env in envs:
- retval.update(inv.get('tag_environment_%s' % env, []))
+ if host_type:
retval.intersection_update(inv.get(self.gen_host_type_tag(host_type), []))
- elif envs and not host_type:
- # Just environment was specified
- for env in envs:
- env_tag = AwsUtil.gen_env_tag(env)
- if env_tag in inv.keys():
- retval.update(inv.get(env_tag, []))
-
- elif host_type and not envs:
- # Just host-type was specified
- host_type_tag = self.gen_host_type_tag(host_type)
- if host_type_tag in inv.keys():
- retval.update(inv.get(host_type_tag, []))
-
- # If version is specified then return only hosts in that version
- if version:
- retval.intersection_update(inv.get('oo_version_%s' % version, []))
+ if version != 'all':
+ retval.intersection_update(inv.get(AwsUtil.gen_version_tag(version), []))
return retval
diff --git a/bin/opssh b/bin/opssh
index 8ac526049..3747bc993 100755
--- a/bin/opssh
+++ b/bin/opssh
@@ -13,6 +13,8 @@ Options:
-p PAR, --par=PAR max number of parallel threads (OPTIONAL)
--outdir=OUTDIR output directory for stdout files (OPTIONAL)
--errdir=ERRDIR output directory for stderr files (OPTIONAL)
+ -c CLUSTER, --cluster CLUSTER
+ which cluster to use
-e ENV, --env ENV which environment to use
-t HOST_TYPE, --host-type HOST_TYPE
which host type to use
@@ -45,9 +47,9 @@ fi
# See if ohi is installed
if ! which ohi &>/dev/null ; then
- echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
+ echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
- exit 10
+ exit 10
fi
PAR=200
@@ -64,12 +66,23 @@ while [ $# -gt 0 ] ; do
shift # get past the value of the option
;;
+ -c)
+ shift # get past the option
+ CLUSTER=$1
+ shift # get past the value of the option
+ ;;
+
-e)
shift # get past the option
ENV=$1
shift # get past the value of the option
;;
+ --v3)
+ OPENSHIFT_VERSION="--v3"
+ shift # get past the value of the option
+ ;;
+
--timeout)
shift # get past the option
TIMEOUT=$1
@@ -106,20 +119,26 @@ while [ $# -gt 0 ] ; do
done
# Get host list from ohi
-if [ -n "$ENV" -a -n "$HOST_TYPE" ] ; then
- HOSTS="$(ohi -t "$HOST_TYPE" -e "$ENV" 2>/dev/null)"
- OHI_ECODE=$?
-elif [ -n "$ENV" ] ; then
- HOSTS="$(ohi -e "$ENV" 2>/dev/null)"
- OHI_ECODE=$?
-elif [ -n "$HOST_TYPE" ] ; then
- HOSTS="$(ohi -t "$HOST_TYPE" 2>/dev/null)"
+CMD=""
+if [ -n "$CLUSTER" ] ; then
+ CMD="$CMD -c $CLUSTER"
+fi
+
+if [ -n "$ENV" ] ; then
+ CMD="$CMD -e $ENV"
+fi
+
+if [ -n "$HOST_TYPE" ] ; then
+ CMD="$CMD -t $HOST_TYPE"
+fi
+
+if [ -n "$OPENSHIFT_VERSION" ] ; then
+ CMD="$CMD $OPENSHIFT_VERSION"
+fi
+
+if [ -n "$CMD" ] ; then
+ HOSTS="$(ohi $CMD 2>/dev/null)"
OHI_ECODE=$?
-else
- echo
- echo "Error: either -e or -t must be specified"
- echo
- exit 10
fi
if [ $OHI_ECODE -ne 0 ] ; then
diff --git a/bin/oscp b/bin/oscp
index c79fc8785..b15133642 100755
--- a/bin/oscp
+++ b/bin/oscp
@@ -138,7 +138,7 @@ class Oscp(object):
# attempt to select the correct environment if specified
if self.env:
- results = filter(lambda result: result[1]['ec2_tag_env'] == self.env, results)
+ results = filter(lambda result: result[1]['oo_environment'] == self.env, results)
if results:
return results
@@ -164,10 +164,8 @@ class Oscp(object):
print '{0:<35} {1}'.format(key, server_info[key])
else:
for host_id, server_info in results[:limit]:
- name = server_info['ec2_tag_Name']
- ec2_id = server_info['ec2_id']
- ip = server_info['ec2_ip_address']
- print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+ print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+ '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
if limit:
print
@@ -177,10 +175,9 @@ class Oscp(object):
else:
for env, host_ids in self.host_inventory.items():
for host_id, server_info in host_ids.items():
- name = server_info['ec2_tag_Name']
- ec2_id = server_info['ec2_id']
- ip = server_info['ec2_ip_address']
- print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+ print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+ '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
+
def scp(self):
'''scp files to or from a specified host
@@ -209,12 +206,12 @@ class Oscp(object):
if len(results) > 1:
print "Multiple results found for %s." % self.host
for result in results:
- print "{ec2_tag_Name:<35} {ec2_tag_env:<5} {ec2_id:<10}".format(**result[1])
+ print "{oo_name:<35} {oo_clusterid:<5} {oo_environment:<5} {oo_id:<10}".format(**result[1])
return # early exit, too many results
# Assume we have one and only one.
hostname, server_info = results[0]
- dns = server_info['ec2_public_dns_name']
+ dns = server_info['oo_pulic_ip']
host_str = "%s%s%s" % (self.user, dns, self.path)
diff --git a/bin/ossh b/bin/ossh
index 50fa996c3..6519e4e08 100755
--- a/bin/ossh
+++ b/bin/ossh
@@ -55,15 +55,15 @@ class Ossh(object):
def parse_cli_args(self):
parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
parser.add_argument('-e', '--env', action="store",
- help="Which environment to search for the host ")
+ help="Which environment to search for the host ")
parser.add_argument('-d', '--debug', default=False,
- action="store_true", help="debug mode")
+ action="store_true", help="debug mode")
parser.add_argument('-v', '--verbose', default=False,
- action="store_true", help="Verbose?")
+ action="store_true", help="Verbose?")
parser.add_argument('--refresh-cache', default=False,
- action="store_true", help="Force a refresh on the host cache.")
+ action="store_true", help="Force a refresh on the host cache.")
parser.add_argument('--list', default=False,
- action="store_true", help="list out hosts")
+ action="store_true", help="list out hosts")
parser.add_argument('-c', '--command', action='store',
help='Command to run on remote host')
parser.add_argument('-l', '--login_name', action='store',
@@ -127,7 +127,7 @@ class Ossh(object):
# attempt to select the correct environment if specified
if self.env:
- results = filter(lambda result: result[1]['ec2_tag_env'] == self.env, results)
+ results = filter(lambda result: result[1]['oo_environment'] == self.env, results)
if results:
return results
@@ -153,10 +153,8 @@ class Ossh(object):
print '{0:<35} {1}'.format(key, server_info[key])
else:
for host_id, server_info in results[:limit]:
- name = server_info['ec2_tag_Name']
- ec2_id = server_info['ec2_id']
- ip = server_info['ec2_ip_address']
- print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+ print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+ '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
if limit:
print
@@ -166,10 +164,8 @@ class Ossh(object):
else:
for env, host_ids in self.host_inventory.items():
for host_id, server_info in host_ids.items():
- name = server_info['ec2_tag_Name']
- ec2_id = server_info['ec2_id']
- ip = server_info['ec2_ip_address']
- print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+ print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+ '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
def ssh(self):
'''SSH to a specified host
@@ -195,12 +191,12 @@ class Ossh(object):
if len(results) > 1:
print "Multiple results found for %s." % self.host
for result in results:
- print "{ec2_tag_Name:<35} {ec2_tag_env:<5} {ec2_id:<10}".format(**result[1])
+ print "{oo_name:<35} {oo_clusterid:<5} {oo_environment:<5} {oo_id:<10}".format(**result[1])
return # early exit, too many results
# Assume we have one and only one.
- hostname, server_info = results[0]
- dns = server_info['ec2_public_dns_name']
+ _, server_info = results[0]
+ dns = server_info['oo_public_ip']
ssh_args.append(dns)
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
index 440fa0a45..77b770a43 100755
--- a/bin/ossh_bash_completion
+++ b/bin/ossh_bash_completion
@@ -1,12 +1,12 @@
__ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])'
+ /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])'
elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])'
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])'
elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])'
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])'
fi
}
@@ -26,13 +26,13 @@ complete -F _ossh ossh oscp
__opssh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in m.result["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
fi
}
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
index f9454357b..170ca889b 100644
--- a/bin/ossh_zsh_completion
+++ b/bin/ossh_zsh_completion
@@ -2,13 +2,13 @@
_ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])')
+ print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])')
elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])')
+ print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])')
elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])')
+ print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])')
fi
diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh
index e34ca5bd4..65979c58a 100644
--- a/bin/zsh_functions/_ossh
+++ b/bin/zsh_functions/_ossh
@@ -2,7 +2,7 @@
_ossh_known_hosts(){
if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items()])')
+ print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
fi
}
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 326c36f6c..671c237b9 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -12,6 +12,8 @@ import os
import pdb
import re
import json
+import yaml
+from ansible.utils.unicode import to_unicode
class FilterModule(object):
''' Custom ansible filters '''
@@ -474,6 +476,19 @@ class FilterModule(object):
secret = os.urandom(num_bytes)
return secret.encode('base-64').strip()
+ @staticmethod
+ def to_padded_yaml(data, level=0, indent=2, **kw):
+ ''' returns a yaml snippet padded to match the indent level you specify '''
+ if data in [None, ""]:
+ return ""
+
+ try:
+ transformed = yaml.safe_dump(data, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
+ padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
+ return to_unicode("\n{0}".format(padded))
+ except Exception as my_e:
+ raise errors.AnsibleFilterError('Failed to convert: %s', my_e)
+
def filters(self):
''' returns a mapping of filters to methods '''
return {
@@ -493,5 +508,6 @@ class FilterModule(object):
"oo_parse_named_certificates": self.oo_parse_named_certificates,
"oo_haproxy_backend_masters": self.oo_haproxy_backend_masters,
"oo_pretty_print_cluster": self.oo_pretty_print_cluster,
- "oo_generate_secret": self.oo_generate_secret
+ "oo_generate_secret": self.oo_generate_secret,
+ "to_padded_yaml": self.to_padded_yaml,
}
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 7c260ff21..70938e8d2 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.26
+Version: 3.0.28
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -259,6 +259,19 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Jan 11 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.28-1
+- added the rhe7-host-monitoring service file (mwoodson@redhat.com)
+- Fixing tab completion for latest metadata changes (kwoodson@redhat.com)
+- Removing some internal hostnames (bleanhar@redhat.com)
+- Fixing tab completion for latest metadata changes (kwoodson@redhat.com)
+- Make bin/cluster able to spawn OSE 3.1 clusters (lhuard@amadeus.com)
+- oso_host_monitoring role: removed the f22 and zagg client, replaced it with
+ oso-rhel7-host-monitoring container (mwoodson@redhat.com)
+
+* Fri Jan 08 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.27-1
+- Update to metadata tooling. (kwoodson@redhat.com)
+- Fix VM drive cleanup during terminate on libvirt (lhuard@amadeus.com)
+
* Fri Jan 08 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.26-1
- Bug 1296388 - fixing typo (bleanhar@redhat.com)
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index c8ee9bad4..ae12286bd 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -1,5 +1,19 @@
---
debug_level: 2
+
+deployment_rhel7_ent_base:
+ # rhel-7.1, requires cloud access subscription
+ image: ami-10663b78
+ image_name:
+ region: us-east-1
+ ssh_user: ec2-user
+ sudo: yes
+ keypair: libra
+ type: m4.large
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
+
deployment_vars:
origin:
# centos-7, requires marketplace
@@ -25,15 +39,6 @@ deployment_vars:
security_groups: [ 'public' ]
vpc_subnet:
assign_public_ip:
- enterprise:
- # rhel-7.1, requires cloud access subscription
- image: ami-10663b78
- image_name:
- region: us-east-1
- ssh_user: ec2-user
- sudo: yes
- keypair: libra
- type: m4.large
- security_groups: [ 'public' ]
- vpc_subnet:
- assign_public_ip:
+ enterprise: "{{ deployment_rhel7_ent_base }}"
+ openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+ atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index 9a303c62d..88736ee03 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -4,7 +4,7 @@
openshift_deployment_type: "{{ deployment_type }}"
roles:
- role: rhel_subscribe
- when: deployment_type == "enterprise" and
+ when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
ansible_distribution == "RedHat" and
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 677c274c4..4ecdf2a0c 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -51,6 +51,7 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ portal_net: "{{ openshift_master_portal_net | default(None) }}"
- name: Check status of external etcd certificatees
stat:
path: "{{ openshift.common.config_base }}/master/{{ item }}"
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index bdb39923e..7fb13c7a6 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -3,6 +3,13 @@ do_we_use_openshift_sdn: true
sdn_network_plugin: redhat/openshift-ovs-subnet
debug_level: 2
# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation
+
+deployment_rhel7_ent_base:
+ image: rhel-7
+ machine_type: n1-standard-1
+ ssh_user:
+ sudo: yes
+
deployment_vars:
origin:
image: preinstalled-slave-50g-v5
@@ -14,8 +21,6 @@ deployment_vars:
machine_type: n1-standard-1
ssh_user: root
sudo: no
- enterprise:
- image: rhel-7
- machine_type: n1-standard-1
- ssh_user:
- sudo: yes
+ enterprise: "{{ deployment_rhel7_ent_base }}"
+ openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+ atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index 8b170f99e..da628786b 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -5,6 +5,19 @@ libvirt_network: openshift-ansible
libvirt_uri: 'qemu:///system'
debug_level: 2
+# Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication.
+# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work.
+deployment_rhel7_ent_base:
+ image:
+ url: "{{ lookup('oo_option', 'image_url') |
+ default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+ name: "{{ lookup('oo_option', 'image_name') |
+ default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+ sha256: "{{ lookup('oo_option', 'image_sha256') |
+ default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
+ ssh_user: openshift
+ sudo: yes
+
deployment_vars:
origin:
image:
@@ -25,18 +38,6 @@ deployment_vars:
sha256:
ssh_user: root
sudo: no
- enterprise:
- image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('https://access.cdn.redhat.com//content/origin/files/sha256/ff/ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3/rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}"
- sha256: "{{ lookup('oo_option', 'image_sha256') |
- default('ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3', True) }}"
- ssh_user: openshift
- sudo: yes
-# origin:
-# fedora:
-# url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2"
-# name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
+ enterprise: "{{ deployment_rhel7_ent_base }}"
+ openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+ atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index f8d15999e..76cde1706 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -20,6 +20,11 @@ openstack_flavor:
infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}"
+deployment_rhel7_ent_base:
+ image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}"
+ ssh_user: openshift
+ sudo: yes
+
deployment_vars:
origin:
image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
@@ -29,7 +34,6 @@ deployment_vars:
image:
ssh_user: root
sudo: no
- enterprise:
- image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.1-20150224.0.x86_64', True) }}"
- ssh_user: openshift
- sudo: yes
+ enterprise: "{{ deployment_rhel7_ent_base }}"
+ openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+ atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index e83cfc33c..1e97b047b 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -11,24 +11,8 @@
action: "{{ ansible_pkg_mgr }} name=etcd-2.* state=present"
when: not openshift.common.is_containerized | bool
-- name: Get docker images
- command: docker images
- changed_when: false
- when: openshift.common.is_containerized | bool
- register: docker_images
-
- name: Pull etcd container
command: docker pull {{ openshift.etcd.etcd_image }}
- when: openshift.common.is_containerized | bool and openshift.etcd.etcd_image not in docker_images.stdout
-
-- name: Wait for etcd image
- command: >
- docker images
- register: docker_images
- until: openshift.etcd.etcd_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool
- name: Install etcd container service file
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 8d7686ffd..2b53c9b8e 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -3,32 +3,17 @@
role: common
local_facts:
deployment_type: "{{ openshift_deployment_type }}"
+ cli_image: "{{ osm_image | default(None) }}"
- name: Install clients
yum: pkg={{ openshift.common.service_type }}-clients state=installed
when: not openshift.common.is_containerized | bool
-- name: List Docker images
- command: >
- docker images
- register: docker_images
-
- name: Pull CLI Image
command: >
docker pull {{ openshift.common.cli_image }}
- when: openshift.common.is_containerized | bool and openshift.common.cli_image not in docker_images.stdout
-
-- name: Wait for CLI image
- command: >
- docker images
- register: docker_images
- until: openshift.common.cli_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool
-
- name: Create /usr/local/bin/openshift cli wrapper
template:
src: openshift.j2
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 0ee873a2b..3a2ccb59a 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -38,5 +38,6 @@
set_hostname_default: "{{ not openshift.common.version_greater_than_3_1_or_1_1 }}"
- name: Set hostname
- hostname: name={{ openshift.common.hostname }}
+ command: >
+ hostnamectl set-hostname {{ openshift.common.hostname }}
when: openshift_set_hostname | default(set_hostname_default) | bool
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 397122631..3b46a0df4 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -86,25 +86,9 @@
action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=present"
when: not openshift.common.is_containerized | bool
-- name: Get docker images
- command: docker images
- changed_when: false
- when: openshift.common.is_containerized | bool
- register: docker_images
-
- name: Pull master image
command: >
docker pull {{ openshift.master.master_image }}
- when: openshift.common.is_containerized | bool and openshift.master.master_image not in docker_images.stdout
-
-- name: Wait for master image
- command: >
- docker images
- register: docker_images
- until: openshift.master.master_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool
- name: Install Master docker service file
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 647476b7f..dfcaf1953 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -87,8 +87,8 @@ kubernetesMasterConfig:
- v1beta3
- v1
{% endif %}
- apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_json }}
- controllerArguments: {{ openshift.master.controller_args | default(None) | to_json }}
+ apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }}
+ controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
masterIP: {{ openshift.common.ip }}
podEvictionTimeout: ""
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index 5b4c92f2b..6d9be81c0 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -13,16 +13,10 @@
path: "{{ openshift_master_config_dir }}"
state: directory
-- name: Get docker images
- command: docker images
- changed_when: false
- when: openshift.common.is_containerized | bool
- register: docker_images
-
-- name: Pull required docker image
+- name: Pull master docker image
command: >
docker pull {{ openshift.common.cli_image }}
- when: openshift.common.is_containerized | bool and openshift.common.cli_image not in docker_images.stdout
+ when: openshift.common.is_containerized | bool
- name: Create the master certificates if they do not already exist
command: >
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 33852d7f8..0828d8e2c 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -44,41 +44,14 @@
action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present"
when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool
-- name: Get docker images
- command: docker images
- changed_when: false
- when: openshift.common.is_containerized | bool
- register: docker_images
-
- name: Pull node image
command: >
docker pull {{ openshift.node.node_image }}
- when: openshift.common.is_containerized | bool and openshift.node.node_image not in docker_images.stdout
-
-- name: Wait for node image
- command: >
- docker images
- register: docker_images
- until: openshift.node.node_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool
-
+
- name: Pull OpenVSwitch image
command: >
docker pull {{ openshift.node.ovs_image }}
- when: openshift.common.is_containerized | bool and openshift.node.ovs_image not in docker_images.stdout
- and openshift.common.use_openshift_sdn | bool
-
-- name: Wait for OpenVSwitch image
- command: >
- docker images
- register: docker_images
- until: openshift.node.ovs_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
- name: Install Node docker service file
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 23bd81f91..cbe811f83 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -11,9 +11,7 @@ imageConfig:
format: {{ openshift.node.registry_url }}
latest: false
kind: NodeConfig
-{% if openshift.node.kubelet_args is defined and openshift.node.kubelet_args %}
-kubeletArguments: {{ openshift.node.kubelet_args | to_json }}
-{% endif %}
+kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
diff --git a/roles/oso_host_monitoring/handlers/main.yml b/roles/oso_host_monitoring/handlers/main.yml
index 7863ad15b..3a5d8024c 100644
--- a/roles/oso_host_monitoring/handlers/main.yml
+++ b/roles/oso_host_monitoring/handlers/main.yml
@@ -4,9 +4,3 @@
name: "{{ osohm_host_monitoring }}"
state: restarted
enabled: yes
-
-- name: "Restart the {{ osohm_zagg_client }} service"
- service:
- name: "{{ osohm_zagg_client }}"
- state: restarted
- enabled: yes
diff --git a/roles/oso_host_monitoring/tasks/main.yml b/roles/oso_host_monitoring/tasks/main.yml
index 6ddfa3dcb..a0a453416 100644
--- a/roles/oso_host_monitoring/tasks/main.yml
+++ b/roles/oso_host_monitoring/tasks/main.yml
@@ -5,7 +5,6 @@
with_items:
- osohm_zagg_web_url
- osohm_host_monitoring
- - osohm_zagg_client
- osohm_docker_registry_url
- osohm_default_zagg_server_user
- osohm_default_zagg_server_password
@@ -37,29 +36,12 @@
- "Restart the {{ osohm_host_monitoring }} service"
register: systemd_host_monitoring
-- name: "Copy {{ osohm_zagg_client }} systemd file"
- template:
- src: "{{ osohm_zagg_client }}.service.j2"
- dest: "/etc/systemd/system/{{ osohm_zagg_client }}.service"
- owner: root
- group: root
- mode: 0644
- notify:
- - "Restart the {{ osohm_zagg_client }} service"
- register: zagg_systemd
-
- name: reload systemd
command: /usr/bin/systemctl --system daemon-reload
- when: systemd_host_monitoring | changed or zagg_systemd | changed
+ when: systemd_host_monitoring | changed
- name: "Start the {{ osohm_host_monitoring }} service"
service:
name: "{{ osohm_host_monitoring }}"
state: started
enabled: yes
-
-- name: "Start the {{ osohm_zagg_client }} service"
- service:
- name: "{{ osohm_zagg_client }}"
- state: started
- enabled: yes
diff --git a/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2 b/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2
deleted file mode 100644
index d18ad90fe..000000000
--- a/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-# This is a systemd file to run this docker container under systemd.
-# To make this work:
-# * pull the image (probably from ops docker registry)
-# * place this file in /etc/systemd/system without the .systemd extension
-# * run the commands:
-# systemctl daemon-reload
-# systemctl enable pcp-docker
-# systemctl start pcp-docker
-#
-#
-[Unit]
-Description=PCP Collector Contatainer
-Requires=docker.service
-After=docker.service
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Environment=HOME=/etc/docker/ops
-#Slice=container-small.slice
-
-# systemd syntax '=-' ignore errors from return codes.
-ExecStartPre=-/usr/bin/docker kill "{{ osohm_host_monitoring }}"
-ExecStartPre=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
-ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}"
-
-
-ExecStart=/usr/bin/docker run --rm --name="{{ osohm_host_monitoring }}" \
- --privileged --net=host --pid=host --ipc=host \
- -v /sys:/sys:ro -v /etc/localtime:/etc/localtime:ro \
- -v /var/lib/docker:/var/lib/docker:ro -v /run:/run \
- -v /var/log:/var/log \
- {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}
-
-ExecReload=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
-ExecReload=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
-ExecStop=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
-Restart=always
-RestartSec=30
-
-[Install]
-WantedBy=default.target
diff --git a/roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2 b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2
index bcc8a5e03..753cad69f 100644
--- a/roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2
+++ b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2
@@ -4,12 +4,12 @@
# * place this file in /etc/systemd/system without the .systemd extension
# * run the commands:
# systemctl daemon-reload
-# systemctl enable zagg-client-docker
-# systemctl start zagg-client-docker
+# systemctl enable oso-rhel7-host-monitoring
+# systemctl start oso-rhel7-host-monitoring
#
#
[Unit]
-Description=Zagg Client Contatainer
+Description=Openshift Host Monitoring Container
Requires=docker.service
After=docker.service
@@ -21,40 +21,52 @@ Environment=HOME=/etc/docker/ops
#Slice=container-small.slice
# systemd syntax '=-' ignore errors from return codes.
-ExecStartPre=-/usr/bin/docker kill "{{ osohm_zagg_client }}"
-ExecStartPre=-/usr/bin/docker rm "{{ osohm_zagg_client }}"
-ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_zagg_client }}"
+ExecStartPre=-/usr/bin/docker kill "{{ osohm_host_monitoring }}"
+ExecStartPre=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
+ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}"
+# mwoodson note 1-7-16:
+# pcp recommends mounting /run in their Dockerfile
+# /run conflicts with cron which also runs in this container.
+# I am leaving /run out for now. the guys in #pcp said that they mounted /run
+# to shared the pcp socket that is created in /run. We are not using this,
+# as far as I know.
+# This problem goes away with systemd being run in the containers and not using
+# cron but using systemd timers
+# -v /run:/run \
-ExecStart=/usr/bin/docker run --name {{ osohm_zagg_client }} \
+ExecStart=/usr/bin/docker run --name {{ osohm_host_monitoring }} \
--privileged \
--pid=host \
--net=host \
- -e ZAGG_URL={{ osohm_zagg_web_url }} \
- -e ZAGG_USER={{ osohm_default_zagg_server_user }} \
- -e ZAGG_PASSWORD={{ osohm_default_zagg_server_password }} \
+ --ipc=host \
+ -e ZAGG_URL={{ osohm_zagg_web_url }} \
+ -e ZAGG_USER={{ osohm_default_zagg_server_user }} \
+ -e ZAGG_PASSWORD={{ osohm_default_zagg_server_password }} \
-e ZAGG_CLIENT_HOSTNAME={{ ec2_tag_Name }} \
- -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }} \
+ -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }} \
-e OSO_CLUSTER_GROUP={{ cluster_group }} \
- -e OSO_CLUSTER_ID={{ oo_clusterid }} \
+ -e OSO_CLUSTER_ID={{ oo_clusterid }} \
-e OSO_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_host-type'] }} \
-e OSO_SUB_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }} \
-v /etc/localtime:/etc/localtime \
- -v /run/pcp:/run/pcp \
+ -v /sys:/sys:ro \
+ -v /sys/fs/selinux \
+ -v /var/lib/docker:/var/lib/docker:ro \
-v /var/run/docker.sock:/var/run/docker.sock \
- -v /var/run/openvswitch:/var/run/openvswitch \
+ -v /var/run/openvswitch:/var/run/openvswitch \
{% if hostvars[inventory_hostname]['ec2_tag_host-type'] == 'master' %}
-v /etc/openshift/master/admin.kubeconfig:/etc/openshift/master/admin.kubeconfig \
-v /etc/openshift/master/master.etcd-client.crt:/etc/openshift/master/master.etcd-client.crt \
-v /etc/openshift/master/master.etcd-client.key:/etc/openshift/master/master.etcd-client.key \
-v /etc/openshift/master/master-config.yaml:/etc/openshift/master/master-config.yaml \
{% endif %}
- {{ osohm_docker_registry_url }}{{ osohm_zagg_client }}
+ {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}
-ExecReload=-/usr/bin/docker stop "{{ osohm_zagg_client }}"
-ExecReload=-/usr/bin/docker rm "{{ osohm_zagg_client }}"
-ExecStop=-/usr/bin/docker stop "{{ osohm_zagg_client }}"
+ExecReload=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
+ExecReload=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
+ExecStop=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
Restart=always
RestartSec=30
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index e9e6e4bd4..08540f440 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -2,8 +2,24 @@
- name: Disable all repositories
command: subscription-manager repos --disable="*"
+- set_fact:
+ default_ose_version: '3.0'
+ when: deployment_type == 'enterprise'
+
+- set_fact:
+ default_ose_version: '3.1'
+ when: deployment_type in ['atomic-enterprise', 'openshift-enterprise']
+
+- set_fact:
+ ose_version: "{{ lookup('oo_option', 'ose_version') | default(default_ose_version, True) }}"
+
+- fail:
+ msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
+ when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or
+ ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1'] )
+
- name: Enable RHEL repositories
command: subscription-manager repos \
--enable="rhel-7-server-rpms" \
--enable="rhel-7-server-extras-rpms" \
- --enable="rhel-7-server-ose-3.0-rpms"
+ --enable="rhel-7-server-ose-{{ ose_version }}-rpms"
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index c160ea4e9..eecfd04a0 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -41,4 +41,4 @@
command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }}
- include: enterprise.yml
- when: deployment_type == 'enterprise'
+ when: deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ]
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 05adc7153..2b6c9deee 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -130,14 +130,13 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
host_props['node'] = True
#TODO: Reenable this option once container installs are out of tech preview
- #rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
- # type=click.Choice(['rpm', 'container']),
- # default='rpm')
- #if rpm_or_container == 'container':
- # host_props['containerized'] = True
- #else:
- # host_props['containerized'] = False
- host_props['containerized'] = False
+ rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
+ type=click.Choice(['rpm', 'container']),
+ default='rpm')
+ if rpm_or_container == 'container':
+ host_props['containerized'] = True
+ else:
+ host_props['containerized'] = False
if existing_env:
host_props['new_host'] = True