diff options
299 files changed, 11293 insertions, 1946 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index b7dc52080..ead513c3d 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.19-1 ./ +3.0.36-1 ./ @@ -1,7 +1,7 @@ # openshift-ansible RPM Build instructions We use tito to make building and tracking revisions easy. -For more information on tito, please see the [Tito home page](http://rm-rf.ca/tito "Tito home page"). +For more information on tito, please see the [Tito home page](https://github.com/dgoodwin/tito "Tito home page"). ## Build openshift-ansible-bin diff --git a/README_AEP.md b/README_AEP.md index 83e575ebe..7cdb1c5d5 100644 --- a/README_AEP.md +++ b/README_AEP.md @@ -81,10 +81,10 @@ deployment_type=atomic-enterprise # Pre-release registry URL; note that in the future these images # may have an atomicenterprise/aep- prefix or so. -oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version} +oreg_url=rcm-img-docker:5001/openshift3/ose-${component}:${version} # Pre-release additional repo -openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/AtomicOpenShift/3.1/2015-10-27.1', 'enabled': 1, 'gpgcheck': 0}] +openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm/puddle/build/AtomicOpenShift/3.1/2015-10-27.1', 'enabled': 1, 'gpgcheck': 0}] # host group for masters [masters] @@ -98,6 +98,8 @@ aep3-node[1:2].example.com The hostnames above should resolve both from the hosts themselves and the host where ansible is running (if different). +A more complete example inventory file ([hosts.aep.example](https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.aep.example)) is available under the [`/inventory/byo`](https://github.com/openshift/openshift-ansible/tree/master/inventory/byo) directory. + ## Running the ansible playbooks From the openshift-ansible checkout run: ```sh diff --git a/README_AWS.md b/README_AWS.md index f8ecaec49..c605de43d 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -51,7 +51,7 @@ to setup a private key file to allow ansible to connect to the created hosts. To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS. ``` Host *.compute-1.amazonaws.com - PrivateKey $HOME/.ssh/my_private_key.pem + IdentityFile $HOME/.ssh/my_private_key.pem ``` Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances. diff --git a/README_CONTAINERIZED_INSTALLATION.md b/README_CONTAINERIZED_INSTALLATION.md new file mode 100644 index 000000000..5f51b9507 --- /dev/null +++ b/README_CONTAINERIZED_INSTALLATION.md @@ -0,0 +1,101 @@ +# Overview + +Users may now deploy containerized versions of OpenShift Origin, OpenShift +Enterprise, or Atomic Enterprise Platform on Atomic +Host[https://projectatomic.io] or RHEL, Centos, and Fedora. This includes +OpenvSwitch based SDN. + + +## Installing on Atomic Host + +When installing on Atomic Host you will automatically have containerized +installation methods selected for you based on detection of _/run/ostree-booted_ + +## Installing on RHEL, Centos, or Fedora + +Currently the default installation method for traditional operating systems is +via RPMs. If you wish to deploy using containerized installation you may set the +ansible variable 'containerized=true' on a per host basis. This means that you +may easily deploy environments mixing containerized and RPM based installs. At +this point we suggest deploying heterogeneous environments. + +## CLI Wrappers + +When using containerized installations openshift-ansible will deploy a wrapper +script on each master located in _/usr/local/bin/openshift_ and a set of +symbolic links _/usr/local/bin/oc_, _/usr/local/bin/oadm_, and +_/usr/local/bin/kubectl_ to ease administrative tasks. The wrapper script spawns +a new container on each invocation so you may notice it's slightly slower than +native clients. + +The wrapper scripts mount a limited subset of paths, _~/.kube_, _/etc/origin/_, +and _/tmp_. Be mindful of this when passing in files to be processed by `oc` or + `oadm`. You may find it easier to redirect input like this : + + `oc create -f - < my_file.json` + +## Technical Notes + +### Requisite Images + +Based on your deployment_type the installer will make use of the following +images. Because you may make use of a private repository we've moved the +configuration of docker additional, insecure, and blocked registries to the +beginning of the installation process ensuring that these settings are applied +before attempting to pull any of the following images. + + Origin + openshift/origin + openshift/node (node + openshift-sdn + openvswitch rpm for client tools) + openshift/openvswitch (centos7 + openvswitch rpm, runs ovsdb ovsctl processes) + registry.access.redhat.com/rhel7/etcd + OpenShift Enterprise + openshift3/ose + openshift3/node + openshift3/openvswitch + registry.access.redhat.com/rhel7/etcd + Atomic Enterprise Platform + aep3/aep + aep3/node + aep3/openvswitch + registry.access.redhat.com/rhel7/etcd + + * note openshift3/* and aep3/* images come from registry.access.redhat.com and +rely on the --additional-repository flag being set appropriately. + +### Starting and Stopping Containers + +The installer will create relevant systemd units which can be used to start, +stop, and poll services via normal systemctl commands. These unit names match +those of an RPM installation with the exception of the etcd service which will +be named 'etcd_container'. This change is necessary as currently Atomic Host +ships with etcd package installed as part of Atomic Host and we will instead use +a containerized version. The installer will disable the built in etcd service. +etcd is slated to be removed from os-tree in the future. + +### File Paths + +All configuration files are placed in the same locations as RPM based +installations and will survive os-tree upgrades. + +The examples are installed into _/etc/origin/examples_ rather than +_/usr/share/openshift/examples_ because that is read-only on Atomic Host. + + +### Storage Requirements + +Atomic Host installs normally have a very small root filesystem. However the +etcd, master, and node containers will persist data in /var/lib. Please ensure +that you have enough space on the root filesystem. + +### OpenvSwitch SDN Initialization + +OpenShift SDN initialization requires that the docker bridge be reconfigured and +docker is restarted. This complicates the situation when the node is running +within a container. When using the OVS SDN you'll see the node start, +reconfigure docker, restart docker which will restart all containers, and +finally start successfully. + +The node service may fail to start and be restarted a few times because the +master services are also restarted along with docker. We currently work around +this by relying on Restart=always in the docker based systemd units. diff --git a/README_OSE.md b/README_OSE.md index 524950d51..fdb6a75b8 100644 --- a/README_OSE.md +++ b/README_OSE.md @@ -82,7 +82,7 @@ deployment_type=enterprise # Pre-release additional repo openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': -'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', +'http://buildvm/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] # Origin copr repo @@ -105,6 +105,8 @@ ose3-node[1:2].example.com The hostnames above should resolve both from the hosts themselves and the host where ansible is running (if different). +A more complete example inventory file ([hosts.ose.example](https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example)) is available under the [`/inventory/byo`](https://github.com/openshift/openshift-ansible/tree/master/inventory/byo) directory. + ## Running the ansible playbooks From the openshift-ansible checkout run: ```sh diff --git a/README_libvirt.md b/README_libvirt.md index fd0250781..3e5df2dca 100644 --- a/README_libvirt.md +++ b/README_libvirt.md @@ -115,9 +115,10 @@ Configuration The following options can be passed via the `-o` flag of the `create` command or as environment variables: -* `image_url` (default to `http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2`): URL of the QCOW2 image to download +* `image_url` (default to `http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz`): URL of the QCOW2 image to download * `image_name` (default to `CentOS-7-x86_64-GenericCloud.qcow2`): Name of the QCOW2 image to boot the VMs on -* `image_sha256` (default to `e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab`): Expected SHA256 checksum of the downloaded image +* `image_compression` (default to `xz`): Source QCOW2 compression (only xz supported at this time) +* `image_sha256` (default to `9461006300d65172f5668d8875f2aad7b54f7ba4e9c5435d65a84a5a2d66e39b`): Expected SHA256 checksum of the downloaded image * `skip_image_download` (default to `no`): Skip QCOW2 image download. This requires the `image_name` QCOW2 image to be already present in `$HOME/libvirt-storage-pool-openshift-ansible` Creating a cluster diff --git a/README_origin.md b/README_origin.md index 343ecda3d..0387e213f 100644 --- a/README_origin.md +++ b/README_origin.md @@ -15,7 +15,7 @@ * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the builds from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842 * Available in Fedora channels * Available for EL with EPEL and Optional channel -* One or more RHEL 7.1 or CentOS 7.1 VMs +* One or more RHEL 7.1+, CentOS 7.1+, or Fedora 23+ VMs * Either ssh key based auth for the root user or ssh key based auth for a user with sudo access (no password) * A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/ @@ -59,12 +59,12 @@ option to ansible-playbook. # This is an example of a bring your own (byo) host inventory # Create an OSEv3 group that contains the masters and nodes groups -[OSv3:children] +[OSEv3:children] masters nodes # Set variables common for all OSEv3 hosts -[OSv3:vars] +[OSEv3:vars] # SSH user, this user should allow ssh based auth without requiring a password ansible_ssh_user=root @@ -95,6 +95,8 @@ osv3-lb.example.com The hostnames above should resolve both from the hosts themselves and the host where ansible is running (if different). +A more complete example inventory file ([hosts.origin.example](https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.origin.example)) is available under the [`/inventory/byo`](https://github.com/openshift/openshift-ansible/tree/master/inventory/byo) directory. + ## Running the ansible playbooks From the openshift-ansible checkout run: ```sh diff --git a/bin/cluster b/bin/cluster index 9b02b4347..c3b101c98 100755 --- a/bin/cluster +++ b/bin/cluster @@ -55,94 +55,108 @@ class Cluster(object): Create an OpenShift cluster for given provider :param args: command line arguments provided by user """ - env = {'cluster_id': args.cluster_id, + cluster = {'cluster_id': args.cluster_id, 'deployment_type': self.get_deployment_type(args)} playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider) inventory = self.setup_provider(args.provider) - env['num_masters'] = args.masters - env['num_nodes'] = args.nodes - env['num_infra'] = args.infra - env['num_etcd'] = args.etcd + cluster['num_masters'] = args.masters + cluster['num_nodes'] = args.nodes + cluster['num_infra'] = args.infra + cluster['num_etcd'] = args.etcd + cluster['cluster_env'] = args.env - self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) - def addNodes(self, args): + def add_nodes(self, args): """ Add nodes to an existing cluster for given provider :param args: command line arguments provided by user """ - env = {'cluster_id': args.cluster_id, - 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{0}/openshift-cluster/addNodes.yml".format(args.provider) + cluster = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args), + } + playbook = "playbooks/{0}/openshift-cluster/add_nodes.yml".format(args.provider) inventory = self.setup_provider(args.provider) - env['num_nodes'] = args.nodes - env['num_infra'] = args.infra + cluster['num_nodes'] = args.nodes + cluster['num_infra'] = args.infra + cluster['cluster_env'] = args.env - self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) def terminate(self, args): """ Destroy OpenShift cluster :param args: command line arguments provided by user """ - env = {'cluster_id': args.cluster_id, - 'deployment_type': self.get_deployment_type(args)} + cluster = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args), + 'cluster_env': args.env, + } playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider) inventory = self.setup_provider(args.provider) - self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) def list(self, args): """ List VMs in cluster :param args: command line arguments provided by user """ - env = {'cluster_id': args.cluster_id, - 'deployment_type': self.get_deployment_type(args)} + cluster = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args), + 'cluster_env': args.env, + } playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider) inventory = self.setup_provider(args.provider) - self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) def config(self, args): """ Configure or reconfigure OpenShift across clustered VMs :param args: command line arguments provided by user """ - env = {'cluster_id': args.cluster_id, - 'deployment_type': self.get_deployment_type(args)} + cluster = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args), + 'cluster_env': args.env, + } playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider) inventory = self.setup_provider(args.provider) - self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) def update(self, args): """ Update to latest OpenShift across clustered VMs :param args: command line arguments provided by user """ - env = {'cluster_id': args.cluster_id, - 'deployment_type': self.get_deployment_type(args)} + cluster = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args), + 'cluster_env': args.env, + } + playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider) inventory = self.setup_provider(args.provider) - self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) def service(self, args): """ Make the same service call across all nodes in the cluster :param args: command line arguments provided by user """ - env = {'cluster_id': args.cluster_id, - 'deployment_type': self.get_deployment_type(args), - 'new_cluster_state': args.state} + cluster = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args), + 'new_cluster_state': args.state, + 'cluster_env': args.env, + } playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider) inventory = self.setup_provider(args.provider) - self.action(args, inventory, env, playbook) + self.action(args, inventory, cluster, playbook) def setup_provider(self, provider): """ @@ -152,10 +166,9 @@ class Cluster(object): """ config = ConfigParser.ConfigParser() if 'gce' == provider: - gce_ini_default_path = os.path.join( - 'inventory/gce/hosts/gce.ini') + gce_ini_default_path = os.path.join('inventory/gce/hosts/gce.ini') gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) - if os.path.exists(gce_ini_path): + if os.path.exists(gce_ini_path): config.readfp(open(gce_ini_path)) for key in config.options('gce'): @@ -190,12 +203,12 @@ class Cluster(object): return inventory - def action(self, args, inventory, env, playbook): + def action(self, args, inventory, cluster, playbook): """ Build ansible-playbook command line and execute :param args: command line arguments provided by user :param inventory: derived provider library - :param env: environment variables for kubernetes + :param cluster: cluster variables for kubernetes :param playbook: ansible playbook to execute """ @@ -206,14 +219,14 @@ class Cluster(object): if args.option: for opt in args.option: k, v = opt.split('=', 1) - env['cli_' + k] = v + cluster['cli_' + k] = v - ansible_env = '-e \'{0}\''.format( - ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()]) + ansible_extra_vars = '-e \'{0}\''.format( + ' '.join(['%s=%s' % (key, value) for (key, value) in cluster.items()]) ) command = 'ansible-playbook {0} {1} {2} {3}'.format( - verbose, inventory, ansible_env, playbook + verbose, inventory, ansible_extra_vars, playbook ) if args.profile: @@ -242,7 +255,7 @@ class ActionFailed(Exception): if __name__ == '__main__': """ - User command to invoke ansible playbooks in a "known" environment + User command to invoke ansible playbooks in a "known" configuration Reads ~/.openshift-ansible for default configuration items [DEFAULT] @@ -251,7 +264,7 @@ if __name__ == '__main__': providers = gce,aws,libvirt,openstack """ - environment = ConfigParser.SafeConfigParser({ + cluster_config = ConfigParser.SafeConfigParser({ 'cluster_ids': 'marketing,sales', 'validate_cluster_ids': 'False', 'providers': 'gce,aws,libvirt,openstack', @@ -259,36 +272,36 @@ if __name__ == '__main__': path = os.path.expanduser("~/.openshift-ansible") if os.path.isfile(path): - environment.read(path) + cluster_config.read(path) cluster = Cluster() parser = argparse.ArgumentParser( - description='Python wrapper to ensure proper environment for OpenShift ansible playbooks', + description='Python wrapper to ensure proper configuration for OpenShift ansible playbooks', ) parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity') parser.add_argument('--version', action='version', version='%(prog)s 0.3') meta_parser = argparse.ArgumentParser(add_help=False) - providers = environment.get('DEFAULT', 'providers').split(',') + providers = cluster_config.get('DEFAULT', 'providers').split(',') meta_parser.add_argument('provider', choices=providers, help='provider') - if environment.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"): - meta_parser.add_argument('cluster_id', choices=environment.get('DEFAULT', 'cluster_ids').split(','), + if cluster_config.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"): + meta_parser.add_argument('cluster_id', choices=cluster_config.get('DEFAULT', 'cluster_ids').split(','), help='prefix for cluster VM names') else: meta_parser.add_argument('cluster_id', help='prefix for cluster VM names') meta_parser.add_argument('-t', '--deployment-type', - choices=['origin', 'online', 'enterprise'], + choices=['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise'], help='Deployment type. (default: origin)') - meta_parser.add_argument('-T', '--product-type', - choices=['openshift', 'atomic-enterprise'], - help='Product type. (default: openshift)') meta_parser.add_argument('-o', '--option', action='append', help='options') + meta_parser.add_argument('--env', default='dev', type=str, + help='environment for the cluster. Defaults to \'dev\'.') + meta_parser.add_argument('-p', '--profile', action='store_true', help='Enable playbook profiling') @@ -308,13 +321,13 @@ if __name__ == '__main__': create_parser.set_defaults(func=cluster.create) - create_parser = action_parser.add_parser('addNodes', help='Add nodes to a cluster', + create_parser = action_parser.add_parser('add-nodes', help='Add nodes to a cluster', parents=[meta_parser]) create_parser.add_argument('-n', '--nodes', default=1, type=int, help='number of nodes to add to the cluster') create_parser.add_argument('-i', '--infra', default=1, type=int, help='number of infra nodes to add to the cluster') - create_parser.set_defaults(func=cluster.addNodes) + create_parser.set_defaults(func=cluster.add_nodes) config_parser = action_parser.add_parser('config', @@ -350,14 +363,14 @@ if __name__ == '__main__': args = parser.parse_args() if 'terminate' == args.action and not args.force: - answer = raw_input("This will destroy the ENTIRE {0} environment. Are you sure? [y/N] ".format(args.cluster_id)) + answer = raw_input("This will destroy the ENTIRE {0} cluster. Are you sure? [y/N] ".format(args.cluster_id)) if answer not in ['y', 'Y']: sys.stderr.write('\nACTION [terminate] aborted by user!\n') exit(1) if 'update' == args.action and not args.force: answer = raw_input( - "This is destructive and could corrupt {0} environment. Continue? [y/N] ".format(args.cluster_id)) + "This is destructive and could corrupt {0} cluster. Continue? [y/N] ".format(args.cluster_id)) if answer not in ['y', 'Y']: sys.stderr.write('\nACTION [update] aborted by user!\n') exit(1) @@ -1,14 +1,16 @@ #!/usr/bin/env python +''' +Ohi = Openshift Host Inventory + +This script provides an easy way to look at your host inventory. + +This depends on multi_inventory being setup correctly. +''' # vim: expandtab:tabstop=4:shiftwidth=4 import argparse -import traceback import sys import os -import re -import tempfile -import time -import subprocess import ConfigParser from openshift_ansible import awsutil @@ -20,6 +22,9 @@ CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases' class Ohi(object): + ''' + Class for managing openshift host inventory + ''' def __init__(self): self.host_type_aliases = {} self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) @@ -35,26 +40,26 @@ class Ohi(object): self.aws = awsutil.AwsUtil(self.host_type_aliases) def run(self): + ''' + Call into awsutil and retrieve the desired hosts and environments + ''' + if self.args.list_host_types: self.aws.print_host_types() return 0 - hosts = None - if self.args.host_type is not None and \ - self.args.env is not None: - # Both env and host-type specified - hosts = self.aws.get_host_list(host_type=self.args.host_type, \ - envs=self.args.env) + if self.args.v3: + version = '3' + elif self.args.all_versions: + version = 'all' + else: + version = '2' - if self.args.host_type is None and \ - self.args.env is not None: - # Only env specified - hosts = self.aws.get_host_list(envs=self.args.env) - - if self.args.host_type is not None and \ - self.args.env is None: - # Only host-type specified - hosts = self.aws.get_host_list(host_type=self.args.host_type) + hosts = self.aws.get_host_list(clusters=self.args.cluster, + host_type=self.args.host_type, + envs=self.args.env, + version=version, + cached=self.args.cache_only) if hosts is None: # We weren't able to determine what they wanted to do @@ -69,6 +74,9 @@ class Ohi(object): return 0 def parse_config_file(self): + ''' + Parse the config file for ohi + ''' if os.path.isfile(self.config_path): config = ConfigParser.ConfigParser() config.read(self.config_path) @@ -85,23 +93,34 @@ class Ohi(object): parser = argparse.ArgumentParser(description='OpenShift Host Inventory') - parser.add_argument('--list-host-types', default=False, action='store_true', - help='List all of the host types') + parser.add_argument('--list-host-types', default=False, action='store_true', help='List all of the host types') + parser.add_argument('--list', default=False, action='store_true', help='List all hosts') - parser.add_argument('-e', '--env', action="store", - help="Which environment to use") + parser.add_argument('-c', '--cluster', action="append", help="Which clusterid to use") + parser.add_argument('-e', '--env', action="append", help="Which environment to use") - parser.add_argument('-t', '--host-type', action="store", - help="Which host type to use") + parser.add_argument('-t', '--host-type', action="store", help="Which host type to use") - parser.add_argument('-l', '--user', action='store', default=None, - help='username') + parser.add_argument('-l', '--user', action='store', default=None, help='username') + parser.add_argument('--cache-only', action='store_true', default=False, + help='Retrieve the host inventory by cache only. Default is false.') - self.args = parser.parse_args() + parser.add_argument('--v2', action='store_true', default=True, + help='Specify the openshift version. Default is 2') + parser.add_argument('--v3', action='store_true', default=False, + help='Specify the openshift version.') -if __name__ == '__main__': + parser.add_argument('--all-versions', action='store_true', default=False, + help='Specify the openshift version. Return all versions') + + self.args = parser.parse_args() + +def main(): + ''' + Ohi will do its work here + ''' if len(sys.argv) == 1: print "\nError: No options given. Use --help to see the available options\n" sys.exit(0) @@ -110,5 +129,9 @@ if __name__ == '__main__': ohi = Ohi() exitcode = ohi.run() sys.exit(exitcode) - except ArgumentError as e: - print "\nError: %s\n" % e.message + except ArgumentError as err: + print "\nError: %s\n" % err.message + +if __name__ == '__main__': + main() + diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py index 45345007c..3639ef733 100644 --- a/bin/openshift_ansible/awsutil.py +++ b/bin/openshift_ansible/awsutil.py @@ -46,19 +46,36 @@ class AwsUtil(object): self.alias_lookup[value] = key @staticmethod - def get_inventory(args=None): + def get_inventory(args=None, cached=False): """Calls the inventory script and returns a dictionary containing the inventory." Keyword arguments: args -- optional arguments to pass to the inventory script """ minv = multi_inventory.MultiInventory(args) - minv.run() + if cached: + minv.get_inventory_from_cache() + else: + minv.run() return minv.result + def get_clusters(self): + """Searches for cluster tags in the inventory and returns all of the clusters found.""" + pattern = re.compile(r'^oo_clusterid_(.*)') + + clusters = [] + inv = self.get_inventory() + for key in inv.keys(): + matched = pattern.match(key) + if matched: + clusters.append(matched.group(1)) + + clusters.sort() + return clusters + def get_environments(self): """Searches for env tags in the inventory and returns all of the envs found.""" - pattern = re.compile(r'^tag_environment_(.*)') + pattern = re.compile(r'^oo_environment_(.*)') envs = [] inv = self.get_inventory() @@ -72,7 +89,7 @@ class AwsUtil(object): def get_host_types(self): """Searches for host-type tags in the inventory and returns all host-types found.""" - pattern = re.compile(r'^tag_host-type_(.*)') + pattern = re.compile(r'^oo_hosttype_(.*)') host_types = [] inv = self.get_inventory() @@ -151,61 +168,68 @@ class AwsUtil(object): return host_type @staticmethod + def gen_version_tag(ver): + """Generate the version tag + """ + return "oo_version_%s" % ver + + @staticmethod + def gen_clusterid_tag(clu): + """Generate the clusterid tag + """ + return "oo_clusterid_%s" % clu + + @staticmethod def gen_env_tag(env): """Generate the environment tag """ - return "tag_environment_%s" % env + return "oo_environment_%s" % env - def gen_host_type_tag(self, host_type): + def gen_host_type_tag(self, host_type, version): """Generate the host type tag """ - host_type = self.resolve_host_type(host_type) - return "tag_host-type_%s" % host_type + if version == '2': + host_type = self.resolve_host_type(host_type) + return "oo_hosttype_%s" % host_type - def gen_env_host_type_tag(self, host_type, env): - """Generate the environment host type tag - """ - host_type = self.resolve_host_type(host_type) - return "tag_env-host-type_%s-%s" % (env, host_type) - - def get_host_list(self, host_type=None, envs=None): + # This function uses all of these params to perform a filters on our host inventory. + # pylint: disable=too-many-arguments + def get_host_list(self, clusters=None, host_type=None, envs=None, version=None, cached=False): """Get the list of hosts from the inventory using host-type and environment """ + retval = set([]) envs = envs or [] - inv = self.get_inventory() - # We prefer to deal with a list of environments - if issubclass(type(envs), basestring): - if envs == 'all': - envs = self.get_environments() + inv = self.get_inventory(cached=cached) + + retval.update(inv.get('all_hosts', [])) + + if clusters: + cluster_hosts = set([]) + if len(clusters) > 1: + for cluster in clusters: + clu_tag = AwsUtil.gen_clusterid_tag(cluster) + cluster_hosts.update(inv.get(clu_tag, [])) + else: + cluster_hosts.update(inv.get(AwsUtil.gen_clusterid_tag(clusters[0]), [])) + + retval.intersection_update(cluster_hosts) + + if envs: + env_hosts = set([]) + if len(envs) > 1: + for env in envs: + env_tag = AwsUtil.gen_env_tag(env) + env_hosts.update(inv.get(env_tag, [])) else: - envs = [envs] - - if host_type and envs: - # Both host type and environment were specified - retval = [] - for env in envs: - env_host_type_tag = self.gen_env_host_type_tag(host_type, env) - if env_host_type_tag in inv.keys(): - retval += inv[env_host_type_tag] - return set(retval) - - if envs and not host_type: - # Just environment was specified - retval = [] - for env in envs: - env_tag = AwsUtil.gen_env_tag(env) - if env_tag in inv.keys(): - retval += inv[env_tag] - return set(retval) - - if host_type and not envs: - # Just host-type was specified - retval = [] - host_type_tag = self.gen_host_type_tag(host_type) - if host_type_tag in inv.keys(): - retval = inv[host_type_tag] - return set(retval) - - # We should never reach here! - raise ArgumentError("Invalid combination of parameters") + env_hosts.update(inv.get(AwsUtil.gen_env_tag(envs[0]), [])) + + retval.intersection_update(env_hosts) + + if host_type: + retval.intersection_update(inv.get(self.gen_host_type_tag(host_type, version), [])) + + if version != 'all': + retval.intersection_update(inv.get(AwsUtil.gen_version_tag(version), [])) + + return retval @@ -13,7 +13,10 @@ Options: -p PAR, --par=PAR max number of parallel threads (OPTIONAL) --outdir=OUTDIR output directory for stdout files (OPTIONAL) --errdir=ERRDIR output directory for stderr files (OPTIONAL) + -c CLUSTER, --cluster CLUSTER + which cluster to use -e ENV, --env ENV which environment to use + --v3 When working with v3 environments. v2 by default -t HOST_TYPE, --host-type HOST_TYPE which host type to use --list-host-types list all of the host types @@ -45,9 +48,9 @@ fi # See if ohi is installed if ! which ohi &>/dev/null ; then - echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path." + echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path." - exit 10 + exit 10 fi PAR=200 @@ -64,12 +67,23 @@ while [ $# -gt 0 ] ; do shift # get past the value of the option ;; + -c) + shift # get past the option + CLUSTER=$1 + shift # get past the value of the option + ;; + -e) shift # get past the option ENV=$1 shift # get past the value of the option ;; + --v3) + OPENSHIFT_VERSION="--v3" + shift # get past the value of the option + ;; + --timeout) shift # get past the option TIMEOUT=$1 @@ -106,20 +120,26 @@ while [ $# -gt 0 ] ; do done # Get host list from ohi -if [ -n "$ENV" -a -n "$HOST_TYPE" ] ; then - HOSTS="$(ohi -t "$HOST_TYPE" -e "$ENV" 2>/dev/null)" - OHI_ECODE=$? -elif [ -n "$ENV" ] ; then - HOSTS="$(ohi -e "$ENV" 2>/dev/null)" - OHI_ECODE=$? -elif [ -n "$HOST_TYPE" ] ; then - HOSTS="$(ohi -t "$HOST_TYPE" 2>/dev/null)" +CMD="" +if [ -n "$CLUSTER" ] ; then + CMD="$CMD -c $CLUSTER" +fi + +if [ -n "$ENV" ] ; then + CMD="$CMD -e $ENV" +fi + +if [ -n "$HOST_TYPE" ] ; then + CMD="$CMD -t $HOST_TYPE" +fi + +if [ -n "$OPENSHIFT_VERSION" ] ; then + CMD="$CMD $OPENSHIFT_VERSION" +fi + +if [ -n "$CMD" ] ; then + HOSTS="$(ohi $CMD 2>/dev/null)" OHI_ECODE=$? -else - echo - echo "Error: either -e or -t must be specified" - echo - exit 10 fi if [ $OHI_ECODE -ne 0 ] ; then @@ -138,7 +138,7 @@ class Oscp(object): # attempt to select the correct environment if specified if self.env: - results = filter(lambda result: result[1]['ec2_tag_environment'] == self.env, results) + results = filter(lambda result: result[1]['oo_environment'] == self.env, results) if results: return results @@ -164,10 +164,8 @@ class Oscp(object): print '{0:<35} {1}'.format(key, server_info[key]) else: for host_id, server_info in results[:limit]: - name = server_info['ec2_tag_Name'] - ec2_id = server_info['ec2_id'] - ip = server_info['ec2_ip_address'] - print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) + print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \ + '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info) if limit: print @@ -177,10 +175,9 @@ class Oscp(object): else: for env, host_ids in self.host_inventory.items(): for host_id, server_info in host_ids.items(): - name = server_info['ec2_tag_Name'] - ec2_id = server_info['ec2_id'] - ip = server_info['ec2_ip_address'] - print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) + print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \ + '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info) + def scp(self): '''scp files to or from a specified host @@ -209,12 +206,12 @@ class Oscp(object): if len(results) > 1: print "Multiple results found for %s." % self.host for result in results: - print "{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<10}".format(**result[1]) + print "{oo_name:<35} {oo_clusterid:<5} {oo_environment:<5} {oo_id:<10}".format(**result[1]) return # early exit, too many results # Assume we have one and only one. hostname, server_info = results[0] - dns = server_info['ec2_public_dns_name'] + dns = server_info['oo_pulic_ip'] host_str = "%s%s%s" % (self.user, dns, self.path) @@ -55,15 +55,15 @@ class Ossh(object): def parse_cli_args(self): parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.') parser.add_argument('-e', '--env', action="store", - help="Which environment to search for the host ") + help="Which environment to search for the host ") parser.add_argument('-d', '--debug', default=False, - action="store_true", help="debug mode") + action="store_true", help="debug mode") parser.add_argument('-v', '--verbose', default=False, - action="store_true", help="Verbose?") + action="store_true", help="Verbose?") parser.add_argument('--refresh-cache', default=False, - action="store_true", help="Force a refresh on the host cache.") + action="store_true", help="Force a refresh on the host cache.") parser.add_argument('--list', default=False, - action="store_true", help="list out hosts") + action="store_true", help="list out hosts") parser.add_argument('-c', '--command', action='store', help='Command to run on remote host') parser.add_argument('-l', '--login_name', action='store', @@ -72,6 +72,8 @@ class Ossh(object): parser.add_argument('-o', '--ssh_opts', action='store', help='options to pass to SSH.\n \ "-oForwardX11=yes,TCPKeepAlive=yes"') + parser.add_argument('-A', default=False, action="store_true", + help='Forward authentication agent') parser.add_argument('host', nargs='?', default='') self.args = parser.parse_args() @@ -127,7 +129,7 @@ class Ossh(object): # attempt to select the correct environment if specified if self.env: - results = filter(lambda result: result[1]['ec2_tag_environment'] == self.env, results) + results = filter(lambda result: result[1]['oo_environment'] == self.env, results) if results: return results @@ -153,10 +155,8 @@ class Ossh(object): print '{0:<35} {1}'.format(key, server_info[key]) else: for host_id, server_info in results[:limit]: - name = server_info['ec2_tag_Name'] - ec2_id = server_info['ec2_id'] - ip = server_info['ec2_ip_address'] - print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) + print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \ + '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info) if limit: print @@ -166,10 +166,8 @@ class Ossh(object): else: for env, host_ids in self.host_inventory.items(): for host_id, server_info in host_ids.items(): - name = server_info['ec2_tag_Name'] - ec2_id = server_info['ec2_id'] - ip = server_info['ec2_ip_address'] - print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) + print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \ + '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info) def ssh(self): '''SSH to a specified host @@ -181,6 +179,9 @@ class Ossh(object): if self.user: ssh_args.append('-l%s' % self.user) + if self.args.A: + ssh_args.append('-A') + if self.args.verbose: ssh_args.append('-vvv') @@ -195,12 +196,12 @@ class Ossh(object): if len(results) > 1: print "Multiple results found for %s." % self.host for result in results: - print "{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<10}".format(**result[1]) + print "{oo_name:<35} {oo_clusterid:<5} {oo_environment:<5} {oo_id:<10}".format(**result[1]) return # early exit, too many results # Assume we have one and only one. - hostname, server_info = results[0] - dns = server_info['ec2_public_dns_name'] + _, server_info = results[0] + dns = server_info['oo_public_ip'] ssh_args.append(dns) diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion index 997ff0f9c..77b770a43 100755 --- a/bin/ossh_bash_completion +++ b/bin/ossh_bash_completion @@ -1,12 +1,12 @@ __ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])' elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])' elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])' fi } @@ -26,13 +26,13 @@ complete -F _ossh ossh oscp __opssh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in m.result["_meta"]["hostvars"].items() if "oo_hosttype" in host]))' elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))' elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))' fi } diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion index 3c4018636..170ca889b 100644 --- a/bin/ossh_zsh_completion +++ b/bin/ossh_zsh_completion @@ -2,13 +2,13 @@ _ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])') elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])') elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])') fi diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh index d205e1055..65979c58a 100644 --- a/bin/zsh_functions/_ossh +++ b/bin/zsh_functions/_ossh @@ -2,7 +2,7 @@ _ossh_known_hosts(){ if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])') + print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items()])') fi } diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc index 08d95b2b8..267aa850d 100644 --- a/docs/best_practices_guide.adoc +++ b/docs/best_practices_guide.adoc @@ -13,9 +13,12 @@ This guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119]. == Pull Requests + + +[[All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged]] [cols="2v,v"] |=== -| **Rule** +| <<All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged, Rule>> | All pull requests MUST pass the build bot *before* they are merged. |=== @@ -30,9 +33,10 @@ The tooling is flexible enough that exceptions can be made so that the tool the === Python Source Files ''' +[[Python-source-files-MUST-contain-the-following-vim-mode-line]] [cols="2v,v"] |=== -| **Rule** +| <<Python-source-files-MUST-contain-the-following-vim-mode-line, Rule>> | Python source files MUST contain the following vim mode line. |=== @@ -48,9 +52,10 @@ If mode lines for other editors are needed, please open a GitHub issue. === Method Signatures ''' +[[When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used]] [cols="2v,v"] |=== -| **Rule** +| <<When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used, Rule>> | When adding a new paramemter to an existing method, a default value SHOULD be used |=== The purpose of this rule is to make it so that method signatures are backwards compatible. @@ -74,18 +79,20 @@ def add_person(first_name, last_name, age=None): http://www.pylint.org/[PyLint] is used in an attempt to keep the python code as clean and as managable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request. ''' +[[PyLint-rules-MUST-NOT-be-disabled-on-a-whole-file]] [cols="2v,v"] |=== -| **Rule** +| <<PyLint-rules-MUST-NOT-be-disabled-on-a-whole-file, Rule>> | PyLint rules MUST NOT be disabled on a whole file. |=== Instead, http://docs.pylint.org/faq.html#is-it-possible-to-locally-disable-a-particular-message[disable the PyLint check on the line where PyLint is complaining]. ''' +[[PyLint-rules-MUST-NOT-be-disabled-unless-they-meet-one-of-the-following-exceptions]] [cols="2v,v"] |=== -| **Rule** +| <<PyLint-rules-MUST-NOT-be-disabled-unless-they-meet-one-of-the-following-exceptions, Rule>> | PyLint rules MUST NOT be disabled unless they meet one of the following exceptions |=== @@ -95,9 +102,10 @@ Instead, http://docs.pylint.org/faq.html#is-it-possible-to-locally-disable-a-par 1. When PyLint fails, but the code makes more sense the way it is formatted (stylistic exception). For this exception, the description of the PyLint disable MUST state why the code is more clear, AND the person reviewing the PR will decide if they agree or not. The reviewer may reject the PR if they disagree with the reason for the disable. ''' +[[All-PyLint-rule-disables-MUST-be-documented-in-the-code]] [cols="2v,v"] |=== -| **Rule** +| <<All-PyLint-rule-disables-MUST-be-documented-in-the-code, Rule>> | All PyLint rule disables MUST be documented in the code. |=== @@ -124,9 +132,10 @@ metadata[line] = results.pop() === Yaml Files (Playbooks, Roles, Vars, etc) ''' +[[Ansible-files-SHOULD-NOT-use-JSON-use-pure-YAML-instead]] [cols="2v,v"] |=== -| **Rule** +| <<Ansible-files-SHOULD-NOT-use-JSON-use-pure-YAML-instead, Rule>> | Ansible files SHOULD NOT use JSON (use pure YAML instead). |=== @@ -144,9 +153,10 @@ Every effort should be made to keep our Ansible YAML files in pure YAML. === Modules ''' +[[Custom-Ansible-modules-SHOULD-be-embedded-in-a-role]] [cols="2v,v"] |=== -| **Rule** +| <<Custom-Ansible-modules-SHOULD-be-embedded-in-a-role, Rule>> | Custom Ansible modules SHOULD be embedded in a role. |=== @@ -177,9 +187,10 @@ The purpose of this rule is to make it easy to include custom modules in our pla ''' +[[Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-3-or-more-parameters-are-being-passed]] [cols="2v,v"] |=== -| **Rule** +| <<Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-3-or-more-parameters-are-being-passed, Rule>> | Parameters to Ansible modules SHOULD use the Yaml dictionary format when 3 or more parameters are being passed |=== @@ -204,9 +215,10 @@ When a module has several parameters that are being passed in, it's hard to see ''' +[[Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-the-line-length-exceeds-120-characters]] [cols="2v,v"] |=== -| **Rule** +| <<Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-the-line-length-exceeds-120-characters, Rule>> | Parameters to Ansible modules SHOULD use the Yaml dictionary format when the line length exceeds 120 characters |=== @@ -228,9 +240,10 @@ Lines that are long quickly become a wall of text that isn't easily parsable. It ---- ''' +[[The-Ansible-command-module-SHOULD-be-used-instead-of-the-Ansible-shell-module]] [cols="2v,v"] |=== -| **Rule** +| <<The-Ansible-command-module-SHOULD-be-used-instead-of-the-Ansible-shell-module, Rule>> | The Ansible `command` module SHOULD be used instead of the Ansible `shell` module. |=== .Context @@ -251,9 +264,10 @@ The Ansible `shell` module can run most commands that can be run from a bash CLI ---- ''' +[[The-Ansible-quote-filter-MUST-be-used-with-any-variable-passed-into-the-shell-module]] [cols="2v,v"] |=== -| **Rule** +| <<The-Ansible-quote-filter-MUST-be-used-with-any-variable-passed-into-the-shell-module, Rule>> | The Ansible `quote` filter MUST be used with any variable passed into the shell module. |=== .Context @@ -279,9 +293,10 @@ It is recommended not to use the `shell` module. However, if it absolutely must * http://docs.ansible.com/fail_module.html[Ansible Fail Module] ''' +[[Ansible-playbooks-MUST-begin-with-checks-for-any-variables-that-they-require]] [cols="2v,v"] |=== -| **Rule** +| <<Ansible-playbooks-MUST-begin-with-checks-for-any-variables-that-they-require, Rule>> | Ansible playbooks MUST begin with checks for any variables that they require. |=== @@ -299,9 +314,10 @@ If an Ansible playbook requires certain variables to be set, it's best to check ---- ''' +[[Ansible-roles-tasks-main-yml-file-MUST-begin-with-checks-for-any-variables-that-they-require]] [cols="2v,v"] |=== -| **Rule** +| <<Ansible-roles-tasks-main-yml-file-MUST-begin-with-checks-for-any-variables-that-they-require, Rule>> | Ansible roles tasks/main.yml file MUST begin with checks for any variables that they require. |=== @@ -318,9 +334,10 @@ If an Ansible role requires certain variables to be set, it's best to check for === Tasks ''' +[[Ansible-tasks-SHOULD-NOT-be-used-in-ansible-playbooks-Instead-use-pre_tasks-and-post_tasks]] [cols="2v,v"] |=== -| **Rule** +| <<Ansible-tasks-SHOULD-NOT-be-used-in-ansible-playbooks-Instead-use-pre_tasks-and-post_tasks, Rule>> | Ansible tasks SHOULD NOT be used in ansible playbooks. Instead, use pre_tasks and post_tasks. |=== An Ansible play is defined as a Yaml dictionary. Because of that, ansible doesn't know if the play's tasks list or roles list was specified first. Therefore Ansible always runs tasks after roles. @@ -370,9 +387,10 @@ Therefore, we SHOULD use pre_tasks and post_tasks to make it more clear when the === Roles ''' +[[All-tasks-in-a-role-SHOULD-be-tagged-with-the-role-name]] [cols="2v,v"] |=== -| **Rule** +| <<All-tasks-in-a-role-SHOULD-be-tagged-with-the-role-name, Rule>> | All tasks in a role SHOULD be tagged with the role name. |=== @@ -395,9 +413,10 @@ This is very useful when developing and debugging new tasks. It can also signifi ''' +[[The-Ansible-roles-directory-MUST-maintain-a-flat-structure]] [cols="2v,v"] |=== -| **Rule** +| <<The-Ansible-roles-directory-MUST-maintain-a-flat-structure, Rule>> | The Ansible roles directory MUST maintain a flat structure. |=== @@ -410,9 +429,10 @@ This is very useful when developing and debugging new tasks. It can also signifi * Make it compatible with Ansible Galaxy ''' +[[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent]] [cols="2v,v"] |=== -| **Rule** +| [[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent, Rule]] | Ansible Roles SHOULD be named like technology_component[_subcomponent]. |=== @@ -430,9 +450,10 @@ Many times the `technology` portion of the pattern will line up with a package n * http://jinja.pocoo.org/docs/dev/templates/#builtin-filters[Jinja2 Builtin Filters] ''' +[[The-default-filter-SHOULD-replace-empty-strings-lists-etc]] [cols="2v,v"] |=== -| **Rule** +| <<The-default-filter-SHOULD-replace-empty-strings-lists-etc, Rule>> | The `default` filter SHOULD replace empty strings, lists, etc. |=== @@ -466,3 +487,52 @@ If you want to use default with variables that evaluate to false you have to set In other words, normally the `default` filter will only replace the value if it's undefined. By setting the second parameter to `true`, it will also replace the value if it defaults to a false value in python, so None, empty list, empty string, etc. This is almost always more desirable than an empty list, string, etc. + +=== Yum and DNF +''' +[[Package-installation-MUST-use-ansible-action-module-to-abstract-away-dnf-yum]] +[cols="2v,v"] +|=== +| <<Package-installation-MUST-use-ansible-action-module-to-abstract-away-dnf-yum, Rule>> +| Package installation MUST use ansible action module to abstract away dnf/yum. +|=== + +[[Package-installation-MUST-use-name-and-state-present-rather-than-pkg-and-state-installed-respectively]] +[cols="2v,v"] +|=== +| <<Package-installation-MUST-use-name-and-state-present-rather-than-pkg-and-state-installed-respectively, Rule>> +| Package installation MUST use name= and state=present rather than pkg= and state=installed respectively. +|=== + +This is done primarily because if you're registering the result of the +installation and you have two conditional tasks based on whether or not yum or +dnf are in use you'll end up inadvertently overwriting the value. It also +reduces duplication. name= and state=present are common between dnf and yum +modules. + +.Bad: +[source,yaml] +---- +--- +# tasks.yml +- name: Install etcd (for etcdctl) + yum: name=etcd state=latest" + when: "ansible_pkg_mgr == yum" + register: install_result + +- name: Install etcd (for etcdctl) + dnf: name=etcd state=latest" + when: "ansible_pkg_mgr == dnf" + register: install_result +---- + + +.Good: +[source,yaml] +---- +--- +# tasks.yml +- name: Install etcd (for etcdctl) + action: "{{ ansible_pkg_mgr }} name=etcd state=latest" + register: install_result + ---- diff --git a/docs/style_guide.adoc b/docs/style_guide.adoc index 09d4839c7..72eaedcf9 100644 --- a/docs/style_guide.adoc +++ b/docs/style_guide.adoc @@ -19,9 +19,10 @@ This style guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119]. * https://www.python.org/dev/peps/pep-0008/#maximum-line-length[Python Pep8 Line Length] ''' +[[All-lines-SHOULD-be-no-longer-than-80-characters]] [cols="2v,v"] |=== -| **Rule** +| <<All-lines-SHOULD-be-no-longer-than-80-characters, Rule>> | All lines SHOULD be no longer than 80 characters. |=== @@ -31,9 +32,10 @@ Code readability is subjective, therefore pull-requests SHOULD still be merged, ''' +[[All-lines-MUST-be-no-longer-than-120-characters]] [cols="2v,v"] |=== -| **Rule** +| <<All-lines-MUST-be-no-longer-than-120-characters, Rule>> | All lines MUST be no longer than 120 characters. |=== @@ -46,9 +48,10 @@ This is a hard limit and is enforced by the build bot. This check MUST NOT be di === Ansible Yaml file extension ''' +[[All-Ansible-Yaml-files-MUST-have-a-yml-extension-and-NOT-YML-yaml-etc]] [cols="2v,v"] |=== -| **Rule** +| <<All-Ansible-Yaml-files-MUST-have-a-yml-extension-and-NOT-YML-yaml-etc, Rule>> | All Ansible Yaml files MUST have a .yml extension (and NOT .YML, .yaml etc). |=== @@ -59,9 +62,10 @@ Example: `tasks.yml` === Ansible CLI Variables ''' +[[Variables-meant-to-be-passed-in-from-the-ansible-CLI-MUST-have-a-prefix-of-cli]] [cols="2v,v"] |=== -| **Rule** +| <<Variables-meant-to-be-passed-in-from-the-ansible-CLI-MUST-have-a-prefix-of-cli, Rule>> | Variables meant to be passed in from the ansible CLI MUST have a prefix of cli_ |=== @@ -76,9 +80,10 @@ ansible-playbook -e cli_foo=bar someplays.yml === Ansible Global Variables ''' +[[Global-variables-MUST-have-a-prefix-of-g]] [cols="2v,v"] |=== -| **Rule** +| <<Global-variables-MUST-have-a-prefix-of-g, Rule>> | Global variables MUST have a prefix of g_ |=== Ansible global variables are defined as any variables outside of ansible roles. Examples include playbook variables, variables passed in on the cli, etc. @@ -94,9 +99,10 @@ g_environment: someval Ansible role variables are defined as variables contained in (or passed into) a role. ''' +[[Role-variables-MUST-have-a-prefix-of-atleast-3-characters-See.below.for.specific.naming.rules]] [cols="2v,v"] |=== -| **Rule** +| <<Role-variables-MUST-have-a-prefix-of-atleast-3-characters-See.below.for.specific.naming.rules, Rule>> | Role variables MUST have a prefix of atleast 3 characters. See below for specific naming rules. |=== diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 1a854f637..671c237b9 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -8,11 +8,12 @@ Custom filters for use in openshift-ansible from ansible import errors from operator import itemgetter import OpenSSL.crypto -import os.path +import os import pdb import re import json - +import yaml +from ansible.utils.unicode import to_unicode class FilterModule(object): ''' Custom ansible filters ''' @@ -366,9 +367,6 @@ class FilterModule(object): "keyfile": "/etc/origin/master/named_certificates/custom2.key", "names": [ "some-hostname.com" ] }] ''' - if not issubclass(type(certificates), list): - raise errors.AnsibleFilterError("|failed expects certificates is a list") - if not issubclass(type(named_certs_dir), unicode): raise errors.AnsibleFilterError("|failed expects named_certs_dir is unicode") @@ -433,6 +431,7 @@ class FilterModule(object): ''' for tag in tags: # Skip tag_env-host-type to avoid ambiguity with tag_env + # Removing env-host-type tag but leaving this here if tag[:17] == 'tag_env-host-type': continue if tag[:len(key)+4] == 'tag_' + key: @@ -467,6 +466,29 @@ class FilterModule(object): pass return clusters + @staticmethod + def oo_generate_secret(num_bytes): + ''' generate a session secret ''' + + if not issubclass(type(num_bytes), int): + raise errors.AnsibleFilterError("|failed expects num_bytes is int") + + secret = os.urandom(num_bytes) + return secret.encode('base-64').strip() + + @staticmethod + def to_padded_yaml(data, level=0, indent=2, **kw): + ''' returns a yaml snippet padded to match the indent level you specify ''' + if data in [None, ""]: + return "" + + try: + transformed = yaml.safe_dump(data, indent=indent, allow_unicode=True, default_flow_style=False, **kw) + padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()]) + return to_unicode("\n{0}".format(padded)) + except Exception as my_e: + raise errors.AnsibleFilterError('Failed to convert: %s', my_e) + def filters(self): ''' returns a mapping of filters to methods ''' return { @@ -485,5 +507,7 @@ class FilterModule(object): "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs, "oo_parse_named_certificates": self.oo_parse_named_certificates, "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters, - "oo_pretty_print_cluster": self.oo_pretty_print_cluster + "oo_pretty_print_cluster": self.oo_pretty_print_cluster, + "oo_generate_secret": self.oo_generate_secret, + "to_padded_yaml": self.to_padded_yaml, } diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py index f12017967..35a881a85 100644 --- a/filter_plugins/openshift_master.py +++ b/filter_plugins/openshift_master.py @@ -463,7 +463,32 @@ class FilterModule(object): IdentityProviderBase.validate_idp_list(idp_list) return yaml.safe_dump([idp.to_dict() for idp in idp_list], default_flow_style=False) + @staticmethod + def validate_pcs_cluster(data, masters=None): + ''' Validates output from "pcs status", ensuring that each master + provided is online. + Ex: data = ('...', + 'PCSD Status:', + 'master1.example.com: Online', + 'master2.example.com: Online', + 'master3.example.com: Online', + '...') + masters = ['master1.example.com', + 'master2.example.com', + 'master3.example.com'] + returns True + ''' + if not issubclass(type(data), basestring): + raise errors.AnsibleFilterError("|failed expects data is a string or unicode") + if not issubclass(type(masters), list): + raise errors.AnsibleFilterError("|failed expects masters is a list") + valid = True + for master in masters: + if "{0}: Online".format(master) not in data: + valid = False + return valid def filters(self): ''' returns a mapping of filters to methods ''' - return {"translate_idps": self.translate_idps} + return {"translate_idps": self.translate_idps, + "validate_pcs_cluster": self.validate_pcs_cluster} diff --git a/git/parent.py b/git/parent.py new file mode 100755 index 000000000..154a02350 --- /dev/null +++ b/git/parent.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +''' + Script to determine if this commit has also + been merged through the stage branch +''' +# +# Usage: +# parent_check.py <branch> <commit_id> +# +# +import sys +import subprocess + +def run_cli_cmd(cmd, in_stdout=None, in_stderr=None): + '''Run a command and return its output''' + if not in_stderr: + proc = subprocess.Popen(cmd, bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=False) + else: + proc = subprocess.check_output(cmd, bufsize=-1, stdout=in_stdout, stderr=in_stderr, shell=False) + stdout, stderr = proc.communicate() + if proc.returncode != 0: + return {"rc": proc.returncode, "error": stderr} + else: + return {"rc": proc.returncode, "result": stdout} + +def main(): + '''Check to ensure that the commit that is currently + being submitted is also in the stage branch. + + if it is, succeed + else, fail + ''' + branch = 'prod' + + if sys.argv[1] != branch: + sys.exit(0) + + # git co stg + results = run_cli_cmd(['/usr/bin/git', 'checkout', 'stg']) + + # git pull latest + results = run_cli_cmd(['/usr/bin/git', 'pull']) + + # setup on the <prod> branch in git + results = run_cli_cmd(['/usr/bin/git', 'checkout', 'prod']) + + results = run_cli_cmd(['/usr/bin/git', 'pull']) + # merge the passed in commit into my current <branch> + + commit_id = sys.argv[2] + results = run_cli_cmd(['/usr/bin/git', 'merge', commit_id]) + + # get the differences from stg and <branch> + results = run_cli_cmd(['/usr/bin/git', 'rev-list', '--left-right', 'stg...prod']) + + # exit here with error code if the result coming back is an error + if results['rc'] != 0: + print results['error'] + sys.exit(results['rc']) + + count = 0 + # Each 'result' is a commit + # Walk through each commit and see if it is in stg + for commit in results['result'].split('\n'): + + # continue if it is already in stg + if not commit or commit.startswith('<'): + continue + + # remove the first char '>' + commit = commit[1:] + + # check if any remote branches contain $commit + results = run_cli_cmd(['/usr/bin/git', 'branch', '-q', '-r', '--contains', commit], in_stderr=None) + + # if this comes back empty, nothing contains it, we can skip it as + # we have probably created the merge commit here locally + if results['rc'] == 0 and len(results['result']) == 0: + continue + + # The results generally contain origin/pr/246/merge and origin/pr/246/head + # this is the pull request which would contain the commit in question. + # + # If the results do not contain origin/stg then stage does not contain + # the commit in question. Therefore we need to alert! + if 'origin/stg' not in results['result']: + print "\nFAILED: (These commits are not in stage.)\n" + print "\t%s" % commit + count += 1 + + # Exit with count of commits in #{branch} but not stg + sys.exit(count) + +if __name__ == '__main__': + main() + diff --git a/git/parent.rb b/git/parent.rb deleted file mode 100755 index 2acb127c4..000000000 --- a/git/parent.rb +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env ruby -# -# -# - -if __FILE__ == $0 - # If we aren't on master we don't need to parent check - branch = 'prod' - exit(0) if ARGV[0] !~ /#{branch}/ - commit_id = ARGV[1] - %x[/usr/bin/git checkout #{branch}] - %x[/usr/bin/git merge #{commit_id}] - - count = 0 - #lines = %x[/usr/bin/git rev-list --left-right stg...master].split("\n") - lines = %x[/usr/bin/git rev-list --left-right remotes/origin/stg...#{branch}].split("\n") - lines.each do |commit| - # next if they are in stage - next if commit =~ /^</ - # remove the first char '>' - commit = commit[1..-1] - # check if any remote branches contain $commit - results = %x[/usr/bin/git branch -q -r --contains #{commit} 2>/dev/null ] - # if this comes back empty, nothing contains it, we can skip it as - # we have probably created the merge commit here locally - next if results.empty? - - # The results generally contain origin/pr/246/merge and origin/pr/246/head - # this is the pull request which would contain the commit in question. - # - # If the results do not contain origin/stg then stage does not contain - # the commit in question. Therefore we need to alert! - unless results =~ /origin\/stg/ - puts "\nFAILED: (These commits are not in stage.)\n" - puts "\t#{commit}" - count += 1 - end - end - - # Exit with count of commits in #{branch} but not stg - exit(count) -end - -__END__ - diff --git a/git/yaml_validation.py b/git/yaml_validation.py new file mode 100755 index 000000000..2b5c8ed49 --- /dev/null +++ b/git/yaml_validation.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# +# python yaml validator for a git commit +# +''' +python yaml validator for a git commit +''' +import shutil +import sys +import os +import tempfile +import subprocess +import yaml + +def get_changes(oldrev, newrev, tempdir): + '''Get a list of git changes from oldrev to newrev''' + proc = subprocess.Popen(['/usr/bin/git', 'diff', '--name-only', oldrev, + newrev, '--diff-filter=ACM'], stdout=subprocess.PIPE) + stdout, _ = proc.communicate() + files = stdout.split('\n') + + # No file changes + if not files: + return [] + + cmd = '/usr/bin/git archive %s %s | /bin/tar x -C %s' % (newrev, " ".join(files), tempdir) + proc = subprocess.Popen(cmd, shell=True) + _, _ = proc.communicate() + + rfiles = [] + for dirpath, _, fnames in os.walk(tempdir): + for fname in fnames: + rfiles.append(os.path.join(dirpath, fname)) + + return rfiles + +def main(): + ''' + Perform yaml validation + ''' + results = [] + try: + tmpdir = tempfile.mkdtemp(prefix='jenkins-git-') + old, new, _ = sys.argv[1:] + + for file_mod in get_changes(old, new, tmpdir): + + print "+++++++ Received: %s" % file_mod + + if not file_mod.endswith('.yml') and not file_mod.endswith('.yaml') and not os.path.islink(file_mod): + continue + + try: + yaml.load(open(file_mod)) + results.append(True) + + except yaml.scanner.ScannerError as yerr: + print yerr + results.append(False) + finally: + shutil.rmtree(tmpdir) + + if not all(results): + sys.exit(1) + +if __name__ == "__main__": + main() + diff --git a/git/yaml_validation.rb b/git/yaml_validation.rb deleted file mode 100755 index f5ded7a78..000000000 --- a/git/yaml_validation.rb +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env ruby -# -# -# -require 'yaml' -require 'tmpdir' - -class YamlValidate - def self.yaml_file?(filename) - return filename.end_with?('.yaml') || filename.end_with?('.yml') - end - - def self.short_yaml_ext?(filename) - return filename.end_with?(".yml") - end - - def self.valid_yaml?(filename) - YAML::load_file(filename) - - return true - end -end - -class GitCommit - attr_accessor :oldrev, :newrev, :refname, :tmp - def initialize(oldrev, newrev, refname) - @oldrev = oldrev - @newrev = newrev - @refname = refname - @tmp = Dir.mktmpdir(@newrev) - end - - def get_file_changes() - files = %x[/usr/bin/git diff --name-only #{@oldrev} #{@newrev} --diff-filter=ACM].split("\n") - - # if files is empty we will get a full checkout. This happens on - # a git rm file. If there are no changes then we need to skip the archive - return [] if files.empty? - - # We only want to take the files that changed. Archive will do that when passed - # the filenames. It will export these to a tmp dir - system("/usr/bin/git archive #{@newrev} #{files.join(" ")} | tar x -C #{@tmp}") - return Dir.glob("#{@tmp}/**/*").delete_if { |file| File.directory?(file) } - end -end - -if __FILE__ == $0 - while data = STDIN.gets - oldrev, newrev, refname = data.split - gc = GitCommit.new(oldrev, newrev, refname) - - results = [] - gc.get_file_changes().each do |file| - begin - puts "++++++ Received: #{file}" - - #raise "Yaml file extensions must be .yaml not .yml" if YamlValidate.short_yaml_ext? file - - # skip readme, other files, etc - next unless YamlValidate.yaml_file?(file) - - results << YamlValidate.valid_yaml?(file) - rescue Exception => ex - puts "\n#{ex.message}\n\n" - results << false - end - end - - #puts "RESULTS\n#{results.inspect}\n" - exit 1 if results.include?(false) - end -end diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini index 1f503b8cf..aa0f9090f 100644 --- a/inventory/aws/hosts/ec2.ini +++ b/inventory/aws/hosts/ec2.ini @@ -45,10 +45,10 @@ vpc_destination_variable = ip_address route53 = False # To exclude RDS instances from the inventory, uncomment and set to False. -#rds = False +rds = False # To exclude ElastiCache instances from the inventory, uncomment and set to False. -#elasticache = False +elasticache = False # Additionally, you can specify the list of zones to exclude looking up in # 'route53_excluded_zones' as a comma-separated list. diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example index d5b872e06..4e883a5c2 100644 --- a/inventory/byo/hosts.aep.example +++ b/inventory/byo/hosts.aep.example @@ -18,12 +18,46 @@ ansible_ssh_user=root # user must be configured for passwordless sudo #ansible_sudo=true +# Debug level for all Atomic Enterprise components (Defaults to 2) +debug_level=2 + # deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise deployment_type=atomic-enterprise +# Install the openshift examples +#openshift_install_examples=true + # Enable cluster metrics #use_cluster_metrics=true +# Configure logoutURL in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url +#openshift_master_logout_url=http://example.com + +# Configure extensionScripts in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] + +# Configure extensionStylesheets in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_oauth_template=/path/to/login-template.html + +# Configure metricsPublicURL in the master config for cluster metrics +# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html +#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics + +# Configure loggingPublicURL in the master config for aggregate logging +# See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html +#openshift_master_logging_public_url=https://kibana.example.com + # Add additional, insecure, and blocked registries to global docker configuration # For enterprise deployment types we ensure that registry.access.redhat.com is # included if you do not include it @@ -88,6 +122,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Override the default controller lease ttl #osm_controller_lease_ttl=30 +# Configure controller arguments +#osm_controller_args={'resource-quota-sync-period': ['10s']} + +# Configure api server arguments +#osm_api_server_args={'max-requests-inflight': ['400']} + # default subdomain to use for exposed routes #osm_default_subdomain=apps.test.example.com @@ -97,6 +137,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # default project node selector #osm_default_node_selector='region=primary' +# Override the default pod eviction timeout +#openshift_master_pod_eviction_timeout=5m + # default storage plugin dependencies to install, by default the ceph and # glusterfs plugin dependencies will be installed, if available. #osn_storage_plugin_deps=['ceph','glusterfs'] @@ -160,6 +203,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure dnsIP in the node config #openshift_dns_ip=172.30.0.1 +# Configure node kubelet arguments +#openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']} + # host group for masters [masters] aep3-master[1:3]-ansible.test.example.com diff --git a/inventory/byo/hosts.aep_quickstart b/inventory/byo/hosts.aep_quickstart new file mode 100644 index 000000000..46ea3a03f --- /dev/null +++ b/inventory/byo/hosts.aep_quickstart @@ -0,0 +1,20 @@ +[OSEv3:children] +masters +nodes +etcd +lb + +[OSEv3:vars] +ansible_ssh_user=root +deployment_type=atomic-enterprise +osm_use_cockpit=true + +[masters] +ose3-master.example.com + +[nodes] +ose3-master.example.com openshift_scheduleable=True + +[etcd] + +[lb] diff --git a/inventory/byo/hosts.openstack b/inventory/byo/hosts.openstack new file mode 100644 index 000000000..05df75c2f --- /dev/null +++ b/inventory/byo/hosts.openstack @@ -0,0 +1,37 @@ +# This is an example of a bring your own (byo) host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +lb + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +ansible_ssh_user=cloud-user +ansible_sudo=true + +# Debug level for all OpenShift components (Defaults to 2) +debug_level=2 + +deployment_type=openshift-enterprise + +openshift_additional_repos=[{'id': 'ose-3.1', 'name': 'ose-3.1', 'baseurl': 'http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/x86_64/ose/3.1/os', 'enabled': 1, 'gpgcheck': 0}] + +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift.common.config_base }}/htpasswd'}] + +#openshift_pkg_version=-3.0.0.0 + +[masters] +jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" + +[etcd] +jdetiber-etcd.usersys.redhat.com + +[lb] +#ose3-lb-ansible.test.example.com + +[nodes] +jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" +jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 77a3a04b4..632220fa9 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -6,6 +6,7 @@ masters nodes etcd lb +nfs # Set variables common for all OSEv3 hosts [OSEv3:vars] @@ -18,12 +19,46 @@ ansible_ssh_user=root # user must be configured for passwordless sudo #ansible_sudo=true +# Debug level for all OpenShift components (Defaults to 2) +debug_level=2 + # deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise deployment_type=origin +# Install the openshift examples +#openshift_install_examples=true + # Enable cluster metrics #use_cluster_metrics=true +# Configure logoutURL in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url +#openshift_master_logout_url=http://example.com + +# Configure extensionScripts in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] + +# Configure extensionStylesheets in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_oauth_template=/path/to/login-template.html + +# Configure metricsPublicURL in the master config for cluster metrics +# See: https://docs.openshift.org/latest/install_config/cluster_metrics.html +#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics + +# Configure loggingPublicURL in the master config for aggregate logging +# See: https://docs.openshift.org/latest/install_config/aggregate_logging.html +#openshift_master_logging_public_url=https://kibana.example.com + # Add additional, insecure, and blocked registries to global docker configuration # For enterprise deployment types we ensure that registry.access.redhat.com is # included if you do not include it @@ -92,6 +127,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Override the default controller lease ttl #osm_controller_lease_ttl=30 +# Configure controller arguments +#osm_controller_args={'resource-quota-sync-period': ['10s']} + +# Configure api server arguments +#osm_api_server_args={'max-requests-inflight': ['400']} + # default subdomain to use for exposed routes #osm_default_subdomain=apps.test.example.com @@ -101,6 +142,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # default project node selector #osm_default_node_selector='region=primary' +# Override the default pod eviction timeout +#openshift_master_pod_eviction_timeout=5m + # default storage plugin dependencies to install, by default the ceph and # glusterfs plugin dependencies will be installed, if available. #osn_storage_plugin_deps=['ceph','glusterfs'] @@ -164,6 +208,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure dnsIP in the node config #openshift_dns_ip=172.30.0.1 +# NFS Options +#openshift_nfs_exports_dir=/var/export +#openshift_nfs_registry_volume=regvol +#openshift_nfs_export_options='*(rw,sync,all_squash)' + +# Configure node kubelet arguments +#openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']} + # host group for masters [masters] ose3-master[1:3]-ansible.test.example.com diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 5a4310298..ab9a34db3 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -18,12 +18,46 @@ ansible_ssh_user=root # user must be configured for passwordless sudo #ansible_sudo=true +# Debug level for all OpenShift components (Defaults to 2) +debug_level=2 + # deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise deployment_type=openshift-enterprise +# Install the openshift examples +#openshift_install_examples=true + # Enable cluster metrics #use_cluster_metrics=true +# Configure logoutURL in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url +#openshift_master_logout_url=http://example.com + +# Configure extensionScripts in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] + +# Configure extensionStylesheets in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_oauth_template=/path/to/login-template.html + +# Configure metricsPublicURL in the master config for cluster metrics +# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html +#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics + +# Configure loggingPublicURL in the master config for aggregate logging +# See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html +#openshift_master_logging_public_url=https://kibana.example.com + # Add additional, insecure, and blocked registries to global docker configuration # For enterprise deployment types we ensure that registry.access.redhat.com is # included if you do not include it @@ -88,6 +122,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Override the default controller lease ttl #osm_controller_lease_ttl=30 +# Configure controller arguments +#osm_controller_args={'resource-quota-sync-period': ['10s']} + +# Configure api server arguments +#osm_api_server_args={'max-requests-inflight': ['400']} + # default subdomain to use for exposed routes #osm_default_subdomain=apps.test.example.com @@ -97,6 +137,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # default project node selector #osm_default_node_selector='region=primary' +# Override the default pod eviction timeout +#openshift_master_pod_eviction_timeout=5m + # default storage plugin dependencies to install, by default the ceph and # glusterfs plugin dependencies will be installed, if available. #osn_storage_plugin_deps=['ceph','glusterfs'] @@ -160,6 +203,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure dnsIP in the node config #openshift_dns_ip=172.30.0.1 +# Configure node kubelet arguments +#openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']} + # host group for masters [masters] ose3-master[1:3]-ansible.test.example.com diff --git a/inventory/multi_inventory.py b/inventory/multi_inventory.py index 232f2402d..20fc48aa9 100755 --- a/inventory/multi_inventory.py +++ b/inventory/multi_inventory.py @@ -56,15 +56,6 @@ class MultiInventory(object): else: self.config_file = None # expect env vars - - def run(self): - '''This method checks to see if the local - cache is valid for the inventory. - - if the cache is valid; return cache - else the credentials are loaded from multi_inventory.yaml or from the env - and we attempt to get the inventory from the provider specified. - ''' # load yaml if self.config_file and os.path.isfile(self.config_file): self.config = self.load_yaml_config() @@ -91,6 +82,15 @@ class MultiInventory(object): if self.config.has_key('cache_location'): self.cache_path = self.config['cache_location'] + def run(self): + '''This method checks to see if the local + cache is valid for the inventory. + + if the cache is valid; return cache + else the credentials are loaded from multi_inventory.yaml or from the env + and we attempt to get the inventory from the provider specified. + ''' + if self.args.get('refresh_cache', None): self.get_inventory() self.write_to_cache() diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 13f4ac08b..1a1445835 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.19 +Version: 3.0.36 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -13,7 +13,7 @@ URL: https://github.com/openshift/openshift-ansible Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz BuildArch: noarch -Requires: ansible >= 1.9.3 +Requires: ansible >= 1.9.4 Requires: python2 %description @@ -259,6 +259,263 @@ Atomic OpenShift Utilities includes %changelog +* Mon Jan 25 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.36-1 +- Fixing awsutil to support aliases and v3 (kwoodson@redhat.com) +- Fail when master restart playbook finds no active masters rather than any + failed masters. (abutcher@redhat.com) +- Skipping any symlinks for the yaml validation check (kwoodson@redhat.com) +- Added template for config loop. (twiest@redhat.com) +- Test validate_pcs_cluster input is basestring instead of str. + (abutcher@redhat.com) +- Fix error when oo_masters_to_config is empty (jdetiber@redhat.com) +- Update inventory examples for console customization (spinolacastro@gmail.com) +- Expose console config for customization (spinolacastro@gmail.com) +- oso_host_monitoring: added environment as a var to the host monitoring + systemd script (mwoodson@redhat.com) +- Check master certificates during upgrade. (abutcher@redhat.com) +- Use haproxy frontend port for os_firewall. (abutcher@redhat.com) +- Fix native master api sysconfig. (abutcher@redhat.com) +- Enable kubernetes master config of podEvictionTimeout from ansible + (jstuever@redhat.com) +- Fix wrapper pathing for non-root user install. (abutcher@redhat.com) +- Remove camel case for bin/cluster addNodes (jdetiber@redhat.com) +- Update cluster_hosts.yml for cloud providers (jdetiber@redhat.com) +- Removing ruby scripts and replacing with python. (kwoodson@redhat.com) +- Fixed a logic bug and yaml load (kwoodson@redhat.com) +- Fixing yaml validation in python. Inputs behave differently as does glob + (kwoodson@redhat.com) +- oso_monitoring: add the zabbix libs (mwoodson@redhat.com) +- Removing removing scripts and moving to python. (kwoodson@redhat.com) +- add ability to disable ztriggers and disable new container dns check + (jdiaz@redhat.com) +- Remove default disable of SDN for GCE (jdetiber@redhat.com) +- Fix hardcoded api_port in openshift_master_cluster (jdetiber@redhat.com) +- Use local address for loopback kubeconfig (jdetiber@redhat.com) +- consolidate steps and cleanup template dir (jdetiber@redhat.com) +- v3_0_to_v3_1_upgrade: Remove is_atomic check for upgrades + (smunilla@redhat.com) +- v3_0_to_v3_1_upgrade: Copy tasks rather than including from the playbook + (smunilla@redhat.com) +- v3_0_to_v3_1_upgrade: Install storage packages (smunilla@redhat.com) +- Controllers_port and firewall rules (spinolacastro@gmail.com) +- Fix bind address/port when isn't default (spinolacastro@gmail.com) +- Add ability to disable os_firewall (jdetiber@redhat.com) + +* Mon Jan 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.35-1 +- added the lib_timedate role (mwoodson@redhat.com) +- added chrony (mwoodson@redhat.com) +- added oso_moniotoring tools role (mwoodson@redhat.com) +- Improve pacemaker 'is-active' check. (abutcher@redhat.com) + +* Mon Jan 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.34-1 +- clean up too-many-branches / logic (jdiaz@redhat.com) +- atomic-openshift-installer: add containerized to inventory + (smunilla@redhat.com) +- Add 'unknown' to possible output for the is-active check. + (abutcher@redhat.com) +- Fix cluster_method conditional in master restart playbook. + (abutcher@redhat.com) +- Use IdentityFile instead of PrivateKey (donovan.muller@gmail.com) +- atomic-openshift-installer: Remove containerized install for 3.0 + (smunilla@redhat.com) +- Host group should be OSEv3 not OSv3 (donovan.muller@gmail.com) +- Remove pause after haproxy start (abutcher@redhat.com) +- Ensure nfs-utils installed for non-atomic hosts. (abutcher@redhat.com) + +* Fri Jan 15 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.33-1 +- Configure nodes which are also masters prior to nodes in containerized + install. (abutcher@redhat.com) +- Call attention to openshift_master_rolling_restart_mode variable in restart + prompt. (abutcher@redhat.com) +- Added anchors for rules in style_guide.adoc in order to make it easier to + reference specific rules in PRs. (twiest@redhat.com) +- Update ec2.ini (jdetiber@redhat.com) + +* Thu Jan 14 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.32-1 +- Uninstall remove containerized wrapper and symlinks (abutcher@redhat.com) + +* Thu Jan 14 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.31-1 +- Check api prior to starting node. (abutcher@redhat.com) +- added anchors (twiest@redhat.com) + +* Wed Jan 13 2016 Joel Diaz <jdiaz@redhat.com> 3.0.30-1 +- Add -A and detail --v3 flags + +* Wed Jan 13 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.29-1 +- 3.1.1 upgrade playbook (bleanhar@redhat.com) +- Updated help menu for v3 flag (kwoodson@redhat.com) +- Add wait in between api and controllers start for native ha. + (abutcher@redhat.com) +- atomic-openshift-installer: Error handling for unicode hostnames + (smunilla@redhat.com) +- Update api verification. (abutcher@redhat.com) +- Add a Verify API Server handler that waits for the API server to become + available (sdodson@redhat.com) +- Add -A parameter to forward ssh agent (jdiaz@redhat.com) +- Validate pacemaker cluster members. (abutcher@redhat.com) +- Removed atomic host check (kwoodson@redhat.com) +- Add is_containerized inputs to nosetests. (abutcher@redhat.com) +- Add wait for API before starting controllers w/ native ha install. + (abutcher@redhat.com) +- Fix for to_padded_yaml filter (jdetiber@redhat.com) +- - sqashed to one commit (llange@redhat.com) +- Switch to using hostnamectl as it works on atomic and rhel7 + (sdodson@redhat.com) +- Update rolling restart playbook for pacemaker support. Replace fail with a + warn and prompt if running ansible from a host that will be rebooted. Re- + organize playbooks. (abutcher@redhat.com) +- Implement simple master rolling restarts. (dgoodwin@redhat.com) +- re-enable containerize installs (sdodson@redhat.com) +- Set portal net in master playbook (jdetiber@redhat.com) +- Set the cli image to match osm_image in openshift_cli role + (sdodson@redhat.com) +- atomic-openshift-installer: Populate new_nodes group (smunilla@redhat.com) +- Always pull docker images (sdodson@redhat.com) + +* Mon Jan 11 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.28-1 +- added the rhe7-host-monitoring service file (mwoodson@redhat.com) +- Fixing tab completion for latest metadata changes (kwoodson@redhat.com) +- Removing some internal hostnames (bleanhar@redhat.com) +- Fixing tab completion for latest metadata changes (kwoodson@redhat.com) +- Make bin/cluster able to spawn OSE 3.1 clusters (lhuard@amadeus.com) +- oso_host_monitoring role: removed the f22 and zagg client, replaced it with + oso-rhel7-host-monitoring container (mwoodson@redhat.com) + +* Fri Jan 08 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.27-1 +- Update to metadata tooling. (kwoodson@redhat.com) +- Fix VM drive cleanup during terminate on libvirt (lhuard@amadeus.com) + +* Fri Jan 08 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.26-1 +- Bug 1296388 - fixing typo (bleanhar@redhat.com) + +* Thu Jan 07 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.25-1 +- Bug 1296388 - The playbook still configure ManageIQ when + openshift_use_manageiq is false (bleanhar@redhat.com) +- Add a banner to CLI wrapper instructing users that it's only for + bootstrapping (sdodson@redhat.com) +- Rename env into clusterid and add environment in the OpenStack VMs tags + (lhuard@amadeus.com) +- Fix terminate.yml on OpenStack (lhuard@amadeus.com) +- Install gluster and ceph packages when containerized but not atomic + (sdodson@redhat.com) +- Update openshift_facts config_base for Online deployments (whearn@redhat.com) +- Fix multi-word arguments & cli wrapper stdin plumbing (sdodson@redhat.com) +- Improve 3.1/1.1 upgrade check (jdetiber@redhat.com) + +* Thu Jan 07 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.24-1 +- Setting relative paths in the upgrade playbooks wasn't working + (bleanhar@redhat.com) + +* Wed Jan 06 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.23-1 +- Move extra secret validations into openshift_facts. (abutcher@redhat.com) +- Remove not is_containerized restriction on storage plugin includes. + (abutcher@redhat.com) +- We can't enable manageiq for installations less than OSE 3.1 or Origin 1.1 + (bleanhar@redhat.com) +- Fix RHN subscription by explicitly attaching to the right pool + (lhuard@amadeus.com) +- openshift_facts validation (abutcher@redhat.com) +- Secrets validation. (abutcher@redhat.com) +- Clean up idempotency issues with session secrets. (abutcher@redhat.com) + +* Wed Jan 06 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.22-1 +- playbook for restarting SDN (jdiaz@redhat.com) +- Stop haproxy and remove package during uninstall. (abutcher@redhat.com) +- Group name as per hosts.origin.example (donovan.muller@gmail.com) +- I believe the ami id changed since the initial documentation was created for + AWS deployment (rcook@redhat.com) + +* Tue Jan 05 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.21-1 +- Fix osm_controller_args and osm_api_server_args settings. + (abutcher@redhat.com) +- Fix error in byo cluster_hosts.yml (jdetiber@redhat.com) +- Cleanup and fixes for cluster_id change (jdetiber@redhat.com) +- Fix typo in etcd service status fact. (abutcher@redhat.com) +- Removing environment and env tags. (kwoodson@redhat.com) +- Add node kubelet args to inventory examples. (abutcher@redhat.com) +- Adding ManageIQ service account by default (efreiber@redhat.com) +- Fixes typo assigning docker_service_status_changed which leads to + misinterpretation in handler. (eric.mountain@amadeus.com) +- Fix restart handlers. (abutcher@redhat.com) +- Remove lb from docker hosts. (abutcher@redhat.com) +- Install iptables, iptables-services when not is_aotmic (sdodson@redhat.com) +- Install all xpaas streams when enabled (sdodson@redhat.com) +- add the necessary URLs for logging and metrics + (git001@users.noreply.github.com) +- Link to Tito Home Page is Broken (lloy0076@adam.com.au) +- Conditionalize for 3.1.1/1.1.1 (abutcher@redhat.com) +- Use notify for workaround controllers unit. (abutcher@redhat.com) +- change dns triggers to average (jdiaz@redhat.com) +- add item/trigger for dns tests on all currently running containers + (jdiaz@redhat.com) +- Add jboss-fuse/application-templates/fis-image-streams.json + (sdodson@redhat.com) +- atomic-openshift-installer: Fix broken nosetest (smunilla@redhat.com) +- Update from jboss-openshift/application-templates ose-v1.2.0-1 + (sdodson@redhat.com) +- fix logic to tolerate occasional failures (jdiaz@redhat.com) +- Clean up versions.sh (sdodson@redhat.com) +- change ovs mount to /var/run/openvswitch will not require a container restart + if openvswitch service is restarted (jdiaz@redhat.com) +- split zagg.server.processor.errors into separate heartbeat and metrics error + items (needed since the scripts are split now). (twiest@redhat.com) +- quick installer tests (smunilla@redhat.com) +- atomic-openshift-installer: Remove HA hint for 3.0 install + (smunilla@redhat.com) +- Add some guards to wait for images to be pulled before moving on + (sdodson@redhat.com) +- Install httpd-tools when not is_atomic (sdodson@redhat.com) +- Properly set use_flannel fact (sbaubeau@redhat.com) +- Fix containerized variable (sdodson@redhat.com) +- Skip yum/dnf ops when is_containerized (sdodson@redhat.com) +- Move all docker config into openshift_docker to minimize docker restarts + (sdodson@redhat.com) +- Create nfs host group with registry volume attachment. (abutcher@redhat.com) +- Add openshift_cli role (sdodson@redhat.com) +- pull docker images only if not already present (jdetiber@redhat.com) +- fixes (jdetiber@redhat.com) +- Containerization work by @sdodson (sdodson@redhat.com) +- Initial containerization work from @ibotty (tob@butter.sh) +- Add zabbix values to track docker container DNS results (jdiaz@redhat.com) +- Fix registry modification for new deployment types. (dgoodwin@redhat.com) +- Updates to ohi to pull cache if specified. Also require version + (kwoodson@redhat.com) +- Zabbix: added trigger to monitor app create over the last hour + (mwoodson@redhat.com) +- added 'Template Zagg Server' (twiest@redhat.com) +- Fixes typo when setting facts to record whether master/node has been + restarted already, to decide whether notify handler should do so or not. + Currently, this causes random SDN network setup failures as openshift-node + gets restarted while the setup script is running, and the subsequent start + fails to configure the SDN because it thinks it's already done. + (eric.mountain@amadeus.com) +- Change controllers service type to simple. (abutcher@redhat.com) +- Updating env-host-type to host patterns (kwoodson@redhat.com) +- Add note that Fedora 23+ is acceptable deployment target for origin + (admiller@redhat.com) +- Enforce connection: local and become: no on all localhost plays + (jdetiber@redhat.com) +- Use join for the uncompress command. (jsteffan@fedoraproject.org) +- Update for latest CentOS-7-x86_64-GenericCloud. - Use xz compressed image - + Update sha256 for new image - Update docs to reflect new settings + (jsteffan@fedoraproject.org) + +* Thu Dec 10 2015 Thomas Wiest <twiest@redhat.com> 3.0.20-1 +- Revert "Automatic commit of package [openshift-ansible] release [3.0.20-1]." + (twiest@redhat.com) +- Automatic commit of package [openshift-ansible] release [3.0.20-1]. + (twiest@redhat.com) +- Install base package in openshift_common for version facts + (abutcher@redhat.com) +- Make the install of openshift_examples optional (jtslear@gmail.com) +- add support for remote command actions no support for anything but custom + scripts at this time (jdiaz@redhat.com) +- Remove yum / dnf duplication (sdodson@redhat.com) +- Remove hacluster user during uninstall. (abutcher@redhat.com) +- Simplify session secrets overrides. (abutcher@redhat.com) +- Squash pcs install into one task. (abutcher@redhat.com) +- Bump ansible requirement to 1.9.4 (sdodson@redhat.com) + * Wed Dec 09 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.19-1 - Fix version dependent image streams (sdodson@redhat.com) - atomic-openshift-installer: Error handling on yaml loading diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml index de9f36c8a..0df77e309 100644 --- a/playbooks/adhoc/bootstrap-fedora.yml +++ b/playbooks/adhoc/bootstrap-fedora.yml @@ -1,4 +1,4 @@ -- hosts: OSv3 +- hosts: OSEv3 gather_facts: false tasks: - name: install python and deps for ansible modules diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml index 4f0ef7a75..347d9f574 100644 --- a/playbooks/adhoc/create_pv/create_pv.yaml +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -1,20 +1,21 @@ --- -#example run: +#example run: # ansible-playbook -e "cli_volume_size=1" \ # -e "cli_device_name=/dev/xvdf" \ # -e "cli_hosttype=master" \ -# -e "cli_environment=ops" \ +# -e "cli_clusterid=ops" \ # create_pv.yaml -# FIXME: we need to change "environment" to "clusterid" as that's what it really is now. # - name: Create a volume and attach it to master hosts: localhost + connection: local + become: no gather_facts: no vars: cli_volume_type: gp2 cli_volume_iops: '' oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] | - intersect(groups['tag_environment_' ~ cli_environment]) | + intersect(groups['oo_clusterid_' ~ cli_clusterid]) | first }}" pre_tasks: - fail: @@ -24,7 +25,7 @@ - cli_volume_size - cli_device_name - cli_hosttype - - cli_environment + - cli_clusterid - name: set oo_name fact set_fact: @@ -55,7 +56,7 @@ args: tags: Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}" - env: "{{cli_environment}}" + clusterid: "{{cli_clusterid}}" register: voltags - debug: var=voltags @@ -103,7 +104,7 @@ filesystem: dev: "{{ cli_device_name }}" fstype: ext4 - + - name: Mount the dev mount: name: "{{ pv_mntdir }}" @@ -112,7 +113,7 @@ state: mounted - name: chgrp g+rwXs - file: + file: path: "{{ pv_mntdir }}" mode: 'g+rwXs' recurse: yes @@ -154,6 +155,6 @@ - debug: var=oc_output - - fail: + - fail: msg: "Failed to add {{ pv_template }} to master." when: oc_output.rc != 0 diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index b6a2d2f26..4d32fc40b 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -113,7 +113,7 @@ args: tags: Name: "{{ ec2_tag_Name }}" - env: "{{ ec2_tag_environment }}" + clusterid: "{{ ec2_tag_clusterid }}" register: voltags - name: Wait for volume to attach diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index 63d473146..d24e9cafa 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -20,7 +20,7 @@ # ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml # # Notes: -# * By default this will do a 55GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable +# * By default this will do a 200GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable # * This does a GP2 by default. Support for Provisioned IOPS has not been added # * This will assign the new volume to /dev/xvdc. This is not variablized, yet. # * This can be done with NO downtime on the host @@ -36,7 +36,7 @@ vars: cli_volume_type: gp2 - cli_volume_size: 55 + cli_volume_size: 200 # cli_volume_iops: "{{ 30 * cli_volume_size }}" pre_tasks: @@ -151,7 +151,7 @@ args: tags: Name: "{{ ec2_tag_Name }}" - env: "{{ ec2_tag_environment }}" + clusterid: "{{ ec2_tag_clusterid }}" register: voltags - name: check for attached drive diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml index d250e6e69..2d2cae2b5 100644 --- a/playbooks/adhoc/noc/create_host.yml +++ b/playbooks/adhoc/noc/create_host.yml @@ -1,6 +1,8 @@ --- - name: 'Create a host object in zabbix' hosts: localhost + connection: local + become: no gather_facts: no roles: - os_zabbix @@ -23,6 +25,8 @@ #ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml - name: 'Create a host object in zabbix' hosts: localhost + connection: local + become: no gather_facts: no roles: - os_zabbix diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml index c0ec57ce1..8ad5fa0e2 100644 --- a/playbooks/adhoc/noc/create_maintenance.yml +++ b/playbooks/adhoc/noc/create_maintenance.yml @@ -2,6 +2,8 @@ #ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml - name: 'Create a maintenace object in zabbix' hosts: localhost + connection: local + become: no gather_facts: no roles: - os_zabbix diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml index 4b94fa228..79cae24ab 100644 --- a/playbooks/adhoc/noc/get_zabbix_problems.yml +++ b/playbooks/adhoc/noc/get_zabbix_problems.yml @@ -1,6 +1,8 @@ --- - name: 'Get current hosts who have triggers that are alerting by trigger description' hosts: localhost + connection: local + become: no gather_facts: no roles: - os_zabbix diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 4dcef1a42..d409b4086 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -6,7 +6,7 @@ # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. # The 'clusterid' is the short name of your cluster. -- hosts: tag_env-host-type_{{ clusterid }}-openshift-master +- hosts: tag_clusterid_{{ clusterid }}:&tag_host-type_openshift-master remote_user: root gather_facts: False diff --git a/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml new file mode 100755 index 000000000..0dc021fbc --- /dev/null +++ b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml @@ -0,0 +1,53 @@ +#!/usr/bin/ansible-playbook +--- +#example run: +# ansible-playbook -e "host=ops-node-compute-abcde" oo-sdn-restart.yml +# + +- name: Check vars + hosts: localhost + gather_facts: false + + pre_tasks: + - fail: + msg: "Playbook requires host to be set" + when: host is not defined or host == '' + +- name: Restart openshift/docker (and monitoring containers) + hosts: oo_version_3:&oo_name_{{ host }} + gather_facts: false + user: root + + tasks: + - name: stop openshift/docker + service: + name: "{{ item }}" + state: stopped + with_items: + - atomic-openshift-node + - docker + + - name: restart openvswitch + service: + name: openvswitch + state: restarted + + - name: wait 5 sec + pause: + seconds: 5 + + - name: start openshift/docker + service: + name: "{{ item }}" + state: started + with_items: + - atomic-openshift-node + - docker + + - name: start monitoring containers + service: + name: "{{ item }}" + state: restarted + with_items: + - oso-f22-host-monitoring + - oso-rhel7-zagg-client diff --git a/playbooks/adhoc/setupnfs.yml b/playbooks/adhoc/setupnfs.yml new file mode 100644 index 000000000..5f3631fcf --- /dev/null +++ b/playbooks/adhoc/setupnfs.yml @@ -0,0 +1,21 @@ +--- +### This playbook is old and we are currently not using NFS. +- hosts: tag_Name_nfs-v3-stg + sudo: no + remote_user: root + gather_facts: no + roles: + - role: openshift_storage_nfs_lvm + mount_dir: /exports/stg-black + volume_prefix: "kwoodsontest" + volume_size: 5 + volume_num_start: 222 + number_of_volumes: 3 + tasks: + - fetch: + dest: json/ + src: /root/"{{ item }}" + with_items: + - persistent-volume.kwoodsontest5g0222.json + - persistent-volume.kwoodsontest5g0223.json + - persistent-volume.kwoodsontest5g0224.json diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 08a2ea6fb..36d686c8b 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -19,15 +19,19 @@ failed_when: false register: ostree_output + # Since we're not calling openshift_facts we'll do this for now - set_fact: is_atomic: "{{ ostree_output.rc == 0 }}" + - set_fact: + is_containerized: "{{ is_atomic or containerized | default(false) | bool }}" - name: Remove br0 interface shell: ovs-vsctl del-br br0 changed_when: False failed_when: False - - service: name={{ item }} state=stopped + - name: Stop services + service: name={{ item }} state=stopped with_items: - atomic-enterprise-master - atomic-enterprise-node @@ -36,6 +40,7 @@ - atomic-openshift-master-controllers - atomic-openshift-node - etcd + - haproxy - openshift-master - openshift-master-api - openshift-master-controllers @@ -46,41 +51,11 @@ - origin-master-controllers - origin-node - pcsd + failed_when: false - - yum: name={{ item }} state=absent - when: ansible_pkg_mgr == "yum" and not is_atomic | bool - with_items: - - atomic-enterprise - - atomic-enterprise-master - - atomic-enterprise-node - - atomic-enterprise-sdn-ovs - - atomic-openshift - - atomic-openshift-clients - - atomic-openshift-master - - atomic-openshift-node - - atomic-openshift-sdn-ovs - - corosync - - etcd - - openshift - - openshift-master - - openshift-node - - openshift-sdn - - openshift-sdn-ovs - - openvswitch - - origin - - origin-clients - - origin-master - - origin-node - - origin-sdn-ovs - - pacemaker - - pcs - - tuned-profiles-atomic-enterprise-node - - tuned-profiles-atomic-openshift-node - - tuned-profiles-openshift-node - - tuned-profiles-origin-node - - - dnf: name={{ item }} state=absent - when: ansible_pkg_mgr == "dnf" and not is_atomic | bool + - name: Remove packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent" + when: not is_atomic | bool with_items: - atomic-enterprise - atomic-enterprise-master @@ -93,6 +68,7 @@ - atomic-openshift-sdn-ovs - corosync - etcd + - haproxy - openshift - openshift-master - openshift-node @@ -164,14 +140,26 @@ with_items: - registry\.access\..*redhat\.com/openshift3 - registry\.access\..*redhat\.com/aep3 + - registry\.access\..*redhat\.com/rhel7/etcd - docker.io/openshift - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}" changed_when: False failed_when: False with_items: "{{ images_to_delete.results }}" + + - name: Remove sdn drop files + file: + path: /run/openshift-sdn + state: absent + + - name: restart docker + service: + name: docker + state: restarted - - file: path={{ item }} state=absent + - name: Remove remaining files + file: path={{ item }} state=absent with_items: - "~{{ ansible_ssh_user }}/.kube" - /etc/ansible/facts.d/openshift.fact @@ -181,7 +169,15 @@ - /etc/openshift - /etc/openshift-sdn - /etc/origin + - /etc/systemd/system/atomic-openshift-master.service + - /etc/systemd/system/atomic-openshift-master-api.service + - /etc/systemd/system/atomic-openshift-master-controllers.service + - /etc/systemd/system/atomic-openshift-node.service + - /etc/systemd/system/etcd_container.service + - /etc/systemd/system/openvswitch.service - /etc/sysconfig/atomic-enterprise-master + - /etc/sysconfig/atomic-enterprise-master-api + - /etc/sysconfig/atomic-enterprise-master-controllers - /etc/sysconfig/atomic-enterprise-node - /etc/sysconfig/atomic-openshift-master - /etc/sysconfig/atomic-openshift-master-api @@ -206,6 +202,10 @@ - /usr/lib/systemd/system/atomic-openshift-master-controllers.service - /usr/lib/systemd/system/origin-master-api.service - /usr/lib/systemd/system/origin-master-controllers.service + - /usr/local/bin/openshift + - /usr/local/bin/oadm + - /usr/local/bin/oc + - /usr/local/bin/kubectl # Since we are potentially removing the systemd unit files for separated # master-api and master-controllers services, so we need to reload the diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml index 1e884240a..09f7c76cc 100644 --- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml +++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml @@ -1,6 +1,8 @@ --- - hosts: localhost gather_facts: no + connection: local + become: no vars: g_server: http://localhost:8080/zabbix/api_jsonrpc.php g_user: '' diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml index e2b8150c6..ec28564cf 100755 --- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml +++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml @@ -2,6 +2,8 @@ --- - hosts: localhost gather_facts: no + connection: local + become: no vars: g_server: http://localhost/zabbix/api_jsonrpc.php g_user: Admin diff --git a/playbooks/aws/ansible-tower/config.yml b/playbooks/aws/ansible-tower/config.yml index efd1b9911..eb3f1a1da 100644 --- a/playbooks/aws/ansible-tower/config.yml +++ b/playbooks/aws/ansible-tower/config.yml @@ -2,6 +2,8 @@ - name: "populate oo_hosts_to_config host group if needed" hosts: localhost gather_facts: no + connection: local + become: no tasks: - name: Evaluate oo_host_group_exp if it's set add_host: "name={{ item }} groups=oo_hosts_to_config" diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml index 850238ffb..d40529435 100644 --- a/playbooks/aws/ansible-tower/launch.yml +++ b/playbooks/aws/ansible-tower/launch.yml @@ -2,6 +2,7 @@ - name: Launch instance(s) hosts: localhost connection: local + become: no gather_facts: no vars: @@ -71,8 +72,8 @@ tasks: - - name: Yum update - yum: name=* state=latest + - name: Update All Things + action: "{{ ansible_pkg_mgr }} name=* state=latest" # Apply the configs, seprate so that just the configs can be run by themselves - include: config.yml diff --git a/playbooks/aws/openshift-cluster/addNodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml index fff3e401b..3d88e6b23 100644 --- a/playbooks/aws/openshift-cluster/addNodes.yml +++ b/playbooks/aws/openshift-cluster/add_nodes.yml @@ -2,6 +2,7 @@ - name: Launch instance(s) hosts: localhost connection: local + become: no gather_facts: no vars_files: - vars.yml diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml new file mode 100644 index 000000000..1023f3ec1 --- /dev/null +++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml @@ -0,0 +1,17 @@ +--- +g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([]) + | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}" + +g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}" + +g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}" + +g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}" + +g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}" + +g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}" + +g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra']) | default([]) }}" + +g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute']) | default([]) }}" diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index 5aa6b0f9b..abdb23d78 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -1,24 +1,14 @@ --- -- hosts: localhost - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: - g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" - - include: ../../common/openshift-cluster/config.yml + vars_files: + - ../../aws/openshift-cluster/vars.yml + - ../../aws/openshift-cluster/cluster_hosts.yml vars: - g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" - g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}" - g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" - g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}" - g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" - g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" + g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo: "{{ deployment_vars[deployment_type].sudo }}" g_nodeonmaster: true openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 2 + openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ec2_private_ip_address }}" openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index 09bf34666..15b83dfad 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -2,6 +2,7 @@ - name: Launch instance(s) hosts: localhost connection: local + become: no gather_facts: no vars_files: - vars.yml diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml index 04fcdc0a1..8b41a355e 100644 --- a/playbooks/aws/openshift-cluster/list.yml +++ b/playbooks/aws/openshift-cluster/list.yml @@ -2,10 +2,12 @@ - name: Generate oo_list_hosts group hosts: localhost gather_facts: no + connection: local + become: no vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env_{{ cluster_id }} + - set_fact: scratch_group=tag_clusterid_{{ cluster_id }} when: cluster_id != '' - set_fact: scratch_group=all when: cluster_id == '' diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml index 4415700a3..c2135cd03 100644 --- a/playbooks/aws/openshift-cluster/scaleup.yml +++ b/playbooks/aws/openshift-cluster/scaleup.yml @@ -2,12 +2,11 @@ - hosts: localhost gather_facts: no + connection: local + become: no vars_files: - vars.yml tasks: - - set_fact: - g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" - name: Evaluate oo_hosts_to_update add_host: name: "{{ item }}" @@ -19,16 +18,16 @@ - include: ../../common/openshift-cluster/update_repos_and_packages.yml - include: ../../common/openshift-cluster/scaleup.yml + vars_files: + - ../../aws/openshift-cluster/vars.yml + - ../../aws/openshift-cluster/cluster_hosts.yml vars: - g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" - g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}" - g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" - g_new_nodes_group: 'nodes_to_add' - g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" - g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" + g_new_node_hosts: "{{ groups.nodes_to_add }}" + g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo: "{{ deployment_vars[deployment_type].sudo }}" g_nodeonmaster: true openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 2 + openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ec2_private_ip_address }}" openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml index 25cf48505..d5f7d6b19 100644 --- a/playbooks/aws/openshift-cluster/service.yml +++ b/playbooks/aws/openshift-cluster/service.yml @@ -1,9 +1,12 @@ --- - name: Call same systemctl command for openshift on all instance(s) hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml + - cluster_hosts.yml tasks: - fail: msg="cluster_id is required to be injected in this playbook" when: cluster_id is not defined @@ -14,7 +17,7 @@ groups: g_service_masters ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]) + with_items: "{{ master_hosts | default([]) }}" - name: Evaluate g_service_nodes add_host: @@ -22,7 +25,7 @@ groups: g_service_nodes ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]) + with_items: "{{ node_hosts | default([]) }}" - include: ../../common/openshift-node/service.yml - include: ../../common/openshift-master/service.yml diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 99f0577fc..6090ed6fe 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -2,8 +2,8 @@ - set_fact: created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}" - env: "{{ cluster }}" - env_host_type: "{{ cluster }}-openshift-{{ type }}" + cluster: "{{ cluster_id }}" + env: "{{ cluster_env }}" host_type: "{{ type }}" sub_host_type: "{{ g_sub_host_type }}" @@ -124,10 +124,9 @@ wait: yes instance_tags: created-by: "{{ created_by }}" - environment: "{{ env }}" - env: "{{ env }}" + clusterid: "{{ cluster }}" + environment: "{{ cluster_env }}" host-type: "{{ host_type }}" - env-host-type: "{{ env_host_type }}" sub-host-type: "{{ sub_host_type }}" volumes: "{{ volumes }}" register: ec2 @@ -142,9 +141,8 @@ Name: "{{ item.0 }}" - set_fact: - instance_groups: "tag_created-by_{{ created_by }}, tag_env_{{ env }}, - tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}, - tag_sub-host-type_{{ sub_host_type }}" + instance_groups: "tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }}, tag_environment_{{ cluster_env }}, + tag_host-type_{{ host_type }}, tag_sub-host-type_{{ sub_host_type }}" - set_fact: node_label: diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index 77287cad0..4b9c80b14 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -1,17 +1,18 @@ --- - name: Terminate instance(s) hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env_{{ cluster_id }} - add_host: name: "{{ item }}" groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) + with_items: (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) - name: Unsubscribe VMs hosts: oo_hosts_to_terminate @@ -25,36 +26,37 @@ - name: Terminate instances hosts: localhost connection: local + become: no gather_facts: no - vars: - host_vars: "{{ hostvars - | oo_select_keys(groups['oo_hosts_to_terminate']) }}" tasks: - name: Remove tags from instances - ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent - args: + ec2_tag: + resource: "{{ hostvars[item]['ec2_id'] }}" + region: "{{ hostvars[item]['ec2_region'] }}" + state: absent tags: - env: "{{ item['ec2_tag_env'] }}" - host-type: "{{ item['ec2_tag_host-type'] }}" - env-host-type: "{{ item['ec2_tag_env-host-type'] }}" - sub_host_type: "{{ item['ec2_tag_sub-host-type'] }}" - with_items: host_vars + environment: "{{ hostvars[item]['ec2_tag_environment'] }}" + clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}" + host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}" + sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}" + with_items: groups.oo_hosts_to_terminate when: "'oo_hosts_to_terminate' in groups" - name: Terminate instances ec2: state: absent - instance_ids: ["{{ item.ec2_id }}"] - region: "{{ item.ec2_region }}" + instance_ids: ["{{ hostvars[item].ec2_id }}"] + region: "{{ hostvars[item].ec2_region }}" ignore_errors: yes register: ec2_term - with_items: host_vars + with_items: groups.oo_hosts_to_terminate when: "'oo_hosts_to_terminate' in groups" # Fail if any of the instances failed to terminate with an error other # than 403 Forbidden - - fail: msg=Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }} - when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" + - fail: + msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}" + when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed" with_items: ec2_term.results - name: Stop instance if termination failed @@ -63,7 +65,7 @@ instance_ids: ["{{ item.item.ec2_id }}"] region: "{{ item.item.ec2_region }}" register: ec2_stop - when: "'oo_hosts_to_terminate' in groups and item.failed" + when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed" with_items: ec2_term.results - name: Rename stopped instances diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml index e006aa74a..32bab76b5 100644 --- a/playbooks/aws/openshift-cluster/update.yml +++ b/playbooks/aws/openshift-cluster/update.yml @@ -1,19 +1,20 @@ --- -- name: Populate oo_hosts_to_update group +- name: Update - Populate oo_hosts_to_update group hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml + - cluster_hosts.yml tasks: - - name: Evaluate oo_hosts_to_update + - name: Update - Evaluate oo_hosts_to_update add_host: name: "{{ item }}" groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])) - | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])) - | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([])) + with_items: "{{ g_all_hosts | default([]) }}" - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index 8cad51b5e..11026e38d 100644 --- a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -2,32 +2,16 @@ # This playbook upgrades an existing AWS cluster, leaving nodes untouched if used with an 'online' deployment type. # Usage: # ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=online -e cluster_id=<cluster_id> -- hosts: localhost - gather_facts: no - vars_files: - - ../../vars.yml - - "../../vars.{{ deployment_type }}.{{ cluster_id }}.yml" - - tasks: - - set_fact: - g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" - - - set_fact: - tmp_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}" - when: deployment_type != 'online' - - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml + vars_files: + - "{{lookup('file', '../../../../aws/openshift-cluster/vars.yml')}}" + - "{{lookup('file', '../../../../aws/openshift-cluster/cluster_hosts.yml')}}" vars: - g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" - g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}" - g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" - g_nodes_group: "{{ tmp_nodes_group | default('') }}" - g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" - g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" + g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo: "{{ deployment_vars[deployment_type].sudo }}" g_nodeonmaster: true openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 2 + openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ec2_private_ip_address }}" openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index 95bc4b3e2..ae12286bd 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -1,8 +1,23 @@ --- +debug_level: 2 + +deployment_rhel7_ent_base: + # rhel-7.1, requires cloud access subscription + image: ami-10663b78 + image_name: + region: us-east-1 + ssh_user: ec2-user + sudo: yes + keypair: libra + type: m4.large + security_groups: [ 'public' ] + vpc_subnet: + assign_public_ip: + deployment_vars: origin: # centos-7, requires marketplace - image: ami-96a818fe + image: ami-61bbf104 image_name: region: us-east-1 ssh_user: centos @@ -24,15 +39,6 @@ deployment_vars: security_groups: [ 'public' ] vpc_subnet: assign_public_ip: - enterprise: - # rhel-7.1, requires cloud access subscription - image: ami-10663b78 - image_name: - region: us-east-1 - ssh_user: ec2-user - sudo: yes - keypair: libra - type: m4.large - security_groups: [ 'public' ] - vpc_subnet: - assign_public_ip: + enterprise: "{{ deployment_rhel7_ent_base }}" + openshift-enterprise: "{{ deployment_rhel7_ent_base }}" + atomic-enterprise: "{{ deployment_rhel7_ent_base }}" diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml new file mode 100644 index 000000000..e093b2580 --- /dev/null +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -0,0 +1,13 @@ +--- +g_etcd_hosts: "{{ groups.etcd | default([]) }}" + +g_lb_hosts: "{{ groups.lb | default([]) }}" + +g_master_hosts: "{{ groups.masters | default([]) }}" + +g_node_hosts: "{{ groups.nodes | default([]) }}" + +g_nfs_hosts: "{{ groups.nfs | default([]) }}" + +g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) + | union(g_lb_hosts) | default([]) }}" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 411c7e660..5887b3208 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -1,10 +1,8 @@ --- - include: ../../common/openshift-cluster/config.yml + vars_files: + - ../../byo/openshift-cluster/cluster_hosts.yml vars: - g_etcd_group: "{{ 'etcd' }}" - g_masters_group: "{{ 'masters' }}" - g_nodes_group: "{{ 'nodes' }}" - g_lb_group: "{{ 'lb' }}" openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: 2 + openshift_debug_level: "{{ debug_level | default(2) }}" openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/scaleup.yml b/playbooks/byo/openshift-cluster/scaleup.yml index 70644d427..1702690f6 100644 --- a/playbooks/byo/openshift-cluster/scaleup.yml +++ b/playbooks/byo/openshift-cluster/scaleup.yml @@ -1,10 +1,8 @@ --- - include: ../../common/openshift-cluster/scaleup.yml + vars_files: + - ../../byo/openshift-cluster/cluster_hosts.yml vars: - g_etcd_group: "{{ 'etcd' }}" - g_masters_group: "{{ 'masters' }}" - g_new_nodes_group: "{{ 'new_nodes' }}" - g_lb_group: "{{ 'lb' }}" openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: 2 + openshift_debug_level: "{{ debug_level | default(2) }}" openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md index ce7aebf8e..ca01dbc9d 100644 --- a/playbooks/byo/openshift-cluster/upgrades/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/README.md @@ -1,6 +1,6 @@ # Upgrade playbooks The playbooks provided in this directory can be used for upgrading an existing -environment. Additional notes for the associated upgrade playbooks are +cluster. Additional notes for the associated upgrade playbooks are provided in their respective directories. # Upgrades available diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml index 76fa9ba22..b52456dcd 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml @@ -1,9 +1,12 @@ --- - include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml + vars_files: + - "{{lookup('file', '../../../../byo/openshift-cluster/cluster_hosts.yml')}}" vars: - g_etcd_group: "{{ 'etcd' }}" - g_masters_group: "{{ 'masters' }}" - g_nodes_group: "{{ 'nodes' }}" - g_lb_group: "{{ 'lb' }}" + g_etcd_hosts: "{{ groups.etcd | default([]) }}" + g_master_hosts: "{{ groups.masters | default([]) }}" + g_nfs_hosts: "{{ groups.nfs | default([]) }}" + g_node_hosts: "{{ groups.nodes | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index b06442366..e07e2b88e 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -1,9 +1,12 @@ --- - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml + vars_files: + - "{{lookup('file', '../../../../byo/openshift-cluster/cluster_hosts.yml')}}" vars: - g_etcd_group: "{{ 'etcd' }}" - g_masters_group: "{{ 'masters' }}" - g_nodes_group: "{{ 'nodes' }}" - g_lb_group: "{{ 'lb' }}" + g_etcd_hosts: "{{ groups.etcd | default([]) }}" + g_master_hosts: "{{ groups.masters | default([]) }}" + g_nfs_hosts: "{{ groups.nfs | default([]) }}" + g_node_hosts: "{{ groups.nodes | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md new file mode 100644 index 000000000..b230835c3 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md @@ -0,0 +1,17 @@ +# v3.1 minor upgrade playbook +This upgrade will preserve all locally made configuration modifications to the +Masters and Nodes. + +## Overview +This playbook is available as a technical preview. It currently performs the +following steps. + + * Upgrade and restart master services + * Upgrade and restart node services + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml new file mode 100644 index 000000000..20fa9b10f --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml @@ -0,0 +1,14 @@ +--- +- include: ../../../../common/openshift-cluster/evaluate_groups.yml + vars: + g_etcd_hosts: "{{ groups.etcd | default([]) }}" + g_master_hosts: "{{ groups.masters | default([]) }}" + g_nfs_hosts: "{{ groups.nfs | default([]) }}" + g_node_hosts: "{{ groups.nodes | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_deployment_type: "{{ deployment_type }}" +- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/pre.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml +- include: ../../../openshift-master/restart.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/post.yml diff --git a/playbooks/byo/openshift-master/filter_plugins b/playbooks/byo/openshift-master/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/byo/openshift-master/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/byo/openshift-master/lookup_plugins b/playbooks/byo/openshift-master/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/byo/openshift-master/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml new file mode 100644 index 000000000..a78a6aa3d --- /dev/null +++ b/playbooks/byo/openshift-master/restart.yml @@ -0,0 +1,4 @@ +--- +- include: ../../common/openshift-master/restart.yml + vars_files: + - ../../byo/openshift-cluster/cluster_hosts.yml diff --git a/playbooks/byo/openshift-master/roles b/playbooks/byo/openshift-master/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-master/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 482fa8441..11e5b68f6 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,8 +1,12 @@ --- - include: evaluate_groups.yml +- include: ../openshift-docker/config.yml + - include: ../openshift-etcd/config.yml +- include: ../openshift-nfs/config.yml + - include: ../openshift-master/config.yml - include: ../openshift-node/config.yml diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 34da372a4..db7105ed5 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -1,23 +1,33 @@ --- - name: Populate config host groups hosts: localhost + connection: local + become: no gather_facts: no tasks: - fail: - msg: This playbook requires g_etcd_group to be set - when: g_etcd_group is not defined + msg: This playbook requires g_etcd_hosts to be set + when: g_etcd_hosts is not defined - fail: - msg: This playbook requires g_masters_group to be set - when: g_masters_group is not defined + msg: This playbook requires g_master_hosts to be set + when: g_master_hosts is not defined - fail: - msg: This playbook requires g_nodes_group or g_new_nodes_group to be set - when: g_nodes_group is not defined and g_new_nodes_group is not defined + msg: This playbook requires g_node_hosts or g_new_node_hosts to be set + when: g_node_hosts is not defined and g_new_node_hosts is not defined - fail: - msg: This playbook requires g_lb_group to be set - when: g_lb_group is not defined + msg: This playbook requires g_lb_hosts to be set + when: g_lb_hosts is not defined + + - fail: + msg: This playbook requires g_nfs_hosts to be set + when: g_nfs_hosts is not defined + + - fail: + msg: The nfs group must be limited to one host + when: (groups[g_nfs_hosts] | default([])) | length > 1 - name: Evaluate oo_etcd_to_config add_host: @@ -25,7 +35,7 @@ groups: oo_etcd_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_etcd_group] | default([]) + with_items: "{{ g_etcd_hosts | default([]) }}" - name: Evaluate oo_masters_to_config add_host: @@ -33,11 +43,11 @@ groups: oo_masters_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_masters_group] | default([]) + with_items: "{{ g_master_hosts | default([]) }}" - # Use g_new_nodes_group if it exists otherwise g_nodes_group + # Use g_new_node_hosts if it exists otherwise g_node_hosts - set_fact: - g_nodes_to_config: "{{ g_new_nodes_group | default(g_nodes_group | default([])) }}" + g_node_hosts_to_config: "{{ g_new_node_hosts | default(g_node_hosts | default([])) }}" - name: Evaluate oo_nodes_to_config add_host: @@ -45,32 +55,32 @@ groups: oo_nodes_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_nodes_to_config] | default([]) + with_items: "{{ g_node_hosts_to_config | default([]) }}" - # Skip adding the master to oo_nodes_to_config when g_new_nodes_group is + # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is - name: Evaluate oo_nodes_to_config add_host: name: "{{ item }}" groups: oo_nodes_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_masters_group] | default([]) - when: g_nodeonmaster | default(false) == true and g_new_nodes_group is not defined + with_items: "{{ g_master_hosts | default([]) }}" + when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined - name: Evaluate oo_first_etcd add_host: - name: "{{ groups[g_etcd_group][0] }}" + name: "{{ g_etcd_hosts[0] }}" groups: oo_first_etcd ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0 + when: g_etcd_hosts|length > 0 - name: Evaluate oo_first_master add_host: - name: "{{ groups[g_masters_group][0] }}" + name: "{{ g_master_hosts[0] }}" groups: oo_first_master ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 + when: g_master_hosts|length > 0 - name: Evaluate oo_lb_to_config add_host: @@ -78,4 +88,12 @@ groups: oo_lb_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_lb_group] | default([]) + with_items: "{{ g_lb_hosts | default([]) }}" + + - name: Evaluate oo_nfs_to_config + add_host: + name: "{{ item }}" + groups: oo_nfs_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_nfs_hosts | default([]) }}" diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml index 190e2d862..88736ee03 100644 --- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml +++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml @@ -4,9 +4,10 @@ openshift_deployment_type: "{{ deployment_type }}" roles: - role: rhel_subscribe - when: deployment_type == "enterprise" and + when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and ansible_distribution == "RedHat" and lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] + and not openshift.common.is_atomic | bool - openshift_repos - os_update_latest diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh index c7c966b60..b46407ed7 100644 --- a/playbooks/common/openshift-cluster/upgrades/files/versions.sh +++ b/playbooks/common/openshift-cluster/upgrades/files/versions.sh @@ -1,9 +1,8 @@ #!/bin/bash -yum_installed=$(yum list installed "$@" 2>&1 | tail -n +2 | grep -v 'Installed Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ') - -yum_available=$(yum list available -q "$@" 2>&1 | tail -n +2 | grep -v 'Available Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'el7ose' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ') +yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | tr '\n' ' ') +yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | tr '\n' ' ') echo "---" echo "curr_version: ${yum_installed}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml index 9f7e49b93..63c8ef756 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml @@ -11,7 +11,7 @@ openshift_version: "{{ openshift_pkg_version | default('') }}" tasks: - name: Upgrade master packages - yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest" - name: Restart master services service: name="{{ openshift.common.service_type}}-master" state=restarted @@ -21,7 +21,7 @@ openshift_version: "{{ openshift_pkg_version | default('') }}" tasks: - name: Upgrade node packages - yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest" - name: Restart node services service: name="{{ openshift.common.service_type }}-node" state=restarted diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index 0309e8a77..8ec379109 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -12,6 +12,8 @@ - name: Evaluate additional groups for upgrade hosts: localhost + connection: local + become: no tasks: - name: Evaluate etcd_hosts_to_backup add_host: @@ -27,6 +29,7 @@ hosts: oo_first_master vars: openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}" gather_facts: no tasks: # Pacemaker is currently the only supported upgrade path for multiple masters @@ -43,8 +46,8 @@ - fail: msg: > openshift_pkg_version is {{ openshift_pkg_version }} which is not a - valid version for a 3.1 upgrade - when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare('3.0.2.900','<') + valid version for a {{ target_version }} upgrade + when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<') # If this script errors out ansible will show the default stdout/stderr # which contains details for the user: @@ -53,9 +56,11 @@ - name: Verify upgrade can proceed hosts: oo_masters_to_config:oo_nodes_to_config + vars: + target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}" tasks: - - name: Clean yum cache - command: yum clean all + - name: Clean package cache + command: "{{ ansible_pkg_mgr }} clean all" - set_fact: g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" @@ -75,8 +80,8 @@ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<') - fail: - msg: Atomic OpenShift 3.1 packages not found - when: g_aos_versions.curr_version | version_compare('3.0.2.900','<') and (g_aos_versions.avail_version is none or g_aos_versions.avail_version | version_compare('3.0.2.900','<')) + msg: Upgrade packages not found + when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<')) - set_fact: pre_upgrade_complete: True @@ -87,6 +92,8 @@ ############################################################################## - name: Gate on pre-upgrade checks hosts: localhost + connection: local + become: no vars: pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}" tasks: @@ -149,9 +156,7 @@ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - name: Install etcd (for etcdctl) - yum: - pkg: etcd - state: latest + action: "{{ ansible_pkg_mgr }} name=etcd state=latest" - name: Generate etcd backup command: > @@ -171,6 +176,8 @@ ############################################################################## - name: Gate on etcd backup hosts: localhost + connection: local + become: no tasks: - set_fact: etcd_backup_completed: "{{ hostvars @@ -189,6 +196,8 @@ ############################################################################### - name: Create temp directory for syncing certs hosts: localhost + connection: local + become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -222,17 +231,14 @@ openshift_version: "{{ openshift_pkg_version | default('') }}" tasks: - name: Upgrade to latest available kernel - yum: - pkg: kernel - state: latest + action: "{{ ansible_pkg_mgr}} name=kernel state=latest" - name: Upgrade master packages - command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }} + command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}" - name: Ensure python-yaml present for config upgrade - yum: - pkg: PyYAML - state: installed + action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" + when: not openshift.common.is_atomic | bool - name: Upgrade master configuration openshift_upgrade_config: @@ -242,7 +248,31 @@ config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" - set_fact: - master_certs_missing: True + openshift_master_certs_no_etcd: + - admin.crt + - master.kubelet-client.crt + - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}" + - master.server.crt + - openshift-master.crt + - openshift-registry.crt + - openshift-router.crt + - etcd.server.crt + openshift_master_certs_etcd: + - master.etcd-client.crt + + - set_fact: + openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}" + + - name: Check status of master certificates + stat: + path: "{{ openshift.common.config_base }}/master/{{ item }}" + with_items: openshift_master_certs + register: g_master_cert_stat_result + + - set_fact: + master_certs_missing: "{{ False in (g_master_cert_stat_result.results + | oo_collect(attribute='stat.exists') + | list ) }}" master_cert_subdir: master-{{ openshift.common.hostname }} master_cert_config_dir: "{{ openshift.common.config_base }}/master" @@ -256,8 +286,8 @@ | oo_flatten | unique }}" master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs" masters_needing_certs: "{{ hostvars - | oo_select_keys(groups.oo_masters_to_config) - | difference([groups.oo_first_master.0]) }}" + | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master'])) + | oo_filter_list(filter_attr='master_certs_missing') }}" sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" openshift_deployment_type: "{{ deployment_type }}" roles: @@ -339,6 +369,8 @@ - name: Delete temporary directory on localhost hosts: localhost + connection: local + become: no gather_facts: no tasks: - file: name={{ g_master_mktemp.stdout }} state=absent @@ -357,6 +389,8 @@ ############################################################################## - name: Gate on master update hosts: localhost + connection: local + become: no tasks: - set_fact: master_update_completed: "{{ hostvars @@ -380,7 +414,7 @@ - openshift_facts tasks: - name: Upgrade node packages - command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }} + command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}" - name: Restart node service service: name="{{ openshift.common.service_type }}-node" state=restarted @@ -388,6 +422,24 @@ - name: Ensure node service enabled service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes + - name: Install Ceph storage plugin dependencies + action: "{{ ansible_pkg_mgr }} name=ceph-common state=present" + + - name: Install GlusterFS storage plugin dependencies + action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present" + + - name: Set sebooleans to allow gluster storage plugin access from containers + seboolean: + name: "{{ item }}" + state: yes + persistent: yes + when: ansible_selinux and ansible_selinux.status == "enabled" + with_items: + - virt_use_fusefs + - virt_sandbox_use_fusefs + register: sebool_result + failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean {{ item }} does not exist' not in sebool_result.msg" + - set_fact: node_update_complete: True @@ -397,6 +449,8 @@ ############################################################################## - name: Gate on nodes update hosts: localhost + connection: local + become: no tasks: - set_fact: node_update_completed: "{{ hostvars @@ -464,6 +518,8 @@ ############################################################################## - name: Gate on reconcile hosts: localhost + connection: local + become: no tasks: - set_fact: reconcile_completed: "{{ hostvars diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins new file mode 120000 index 000000000..27ddaa18b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library new file mode 120000 index 000000000..53bed9684 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library @@ -0,0 +1 @@ +../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins new file mode 120000 index 000000000..cf407f69b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins @@ -0,0 +1 @@ +../../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml new file mode 100644 index 000000000..d8336fcae --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml @@ -0,0 +1,50 @@ +--- +############################################################################### +# Post upgrade - Upgrade default router, default registry and examples +############################################################################### +- name: Upgrade default router and default registry + hosts: oo_first_master + vars: + openshift_deployment_type: "{{ deployment_type }}" + registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}" + router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}" + oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + roles: + # Create the new templates shipped in 3.1.z, existing templates are left + # unmodified. This prevents the subsequent role definition for + # openshift_examples from failing when trying to replace templates that do + # not already exist. We could have potentially done a replace --force to + # create and update in one step. + - openshift_examples + # Update the existing templates + - role: openshift_examples + openshift_examples_import_command: replace + pre_tasks: + - name: Check for default router + command: > + {{ oc_cmd }} get -n default dc/router + register: _default_router + failed_when: false + changed_when: false + + - name: Check for default registry + command: > + {{ oc_cmd }} get -n default dc/docker-registry + register: _default_registry + failed_when: false + changed_when: false + + - name: Update router image to current version + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' + --api-version=v1 + + - name: Update registry image to current version + when: _default_registry.rc == 0 + command: > + {{ oc_cmd }} patch dc/docker-registry -p + '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' + --api-version=v1 + diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml new file mode 100644 index 000000000..91780de09 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml @@ -0,0 +1,87 @@ +--- +############################################################################### +# Evaluate host groups and gather facts +############################################################################### +- name: Load openshift_facts + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_facts + +############################################################################### +# Pre-upgrade checks +############################################################################### +- name: Verify upgrade can proceed + hosts: oo_first_master + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}" + gather_facts: no + tasks: + - fail: + msg: > + This upgrade is only supported for origin, openshift-enterprise, and online + deployment types + when: deployment_type not in ['origin','openshift-enterprise', 'online'] + + - fail: + msg: > + openshift_pkg_version is {{ openshift_pkg_version }} which is not a + valid version for a {{ target_version }} upgrade + when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<') + +- name: Verify upgrade can proceed + hosts: oo_masters_to_config:oo_nodes_to_config + vars: + target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}" + tasks: + - name: Clean package cache + command: "{{ ansible_pkg_mgr }} clean all" + + - set_fact: + g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" + + - name: Determine available versions + script: ../files/versions.sh {{ g_new_service_name }} openshift + register: g_versions_result + + - set_fact: + g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}" + + - set_fact: + g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}" + + - fail: + msg: This playbook requires Origin 1.1 or later + when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<') + + - fail: + msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later + when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<') + + - fail: + msg: Upgrade packages not found + when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<')) + + - set_fact: + pre_upgrade_complete: True + + +############################################################################## +# Gate on pre-upgrade checks +############################################################################## +- name: Gate on pre-upgrade checks + hosts: localhost + connection: local + become: no + vars: + pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}" + tasks: + - set_fact: + pre_upgrade_completed: "{{ hostvars + | oo_select_keys(pre_upgrade_hosts) + | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}" + - set_fact: + pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}" + when: pre_upgrade_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml new file mode 100644 index 000000000..81dbba1e3 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml @@ -0,0 +1,137 @@ +--- +############################################################################### +# The restart playbook should be run after this playbook completes. +############################################################################### + +############################################################################### +# Upgrade Masters +############################################################################### +- name: Upgrade master packages and configuration + hosts: oo_masters_to_config + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade master packages + command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}" + + - name: Ensure python-yaml present for config upgrade + action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" + when: not openshift.common.is_atomic | bool + +# Currently 3.1.1 does not have any new configuration settings +# +# - name: Upgrade master configuration +# openshift_upgrade_config: +# from_version: '3.0' +# to_version: '3.1' +# role: master +# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" + +- name: Set master update status to complete + hosts: oo_masters_to_config + tasks: + - set_fact: + master_update_complete: True + +############################################################################## +# Gate on master update complete +############################################################################## +- name: Gate on master update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + master_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" + - set_fact: + master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" + when: master_update_failed | length > 0 + +############################################################################### +# Upgrade Nodes +############################################################################### +- name: Upgrade nodes + hosts: oo_nodes_to_config + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + roles: + - openshift_facts + tasks: + - name: Upgrade node packages + command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}" + + - name: Restart node service + service: name="{{ openshift.common.service_type }}-node" state=restarted + + - set_fact: + node_update_complete: True + +############################################################################## +# Gate on nodes update +############################################################################## +- name: Gate on nodes update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + node_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_nodes_to_config) + | oo_collect('inventory_hostname', {'node_update_complete': true}) }}" + - set_fact: + node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}" + when: node_update_failed | length > 0 + +############################################################################### +# Reconcile Cluster Roles and Cluster Role Bindings +############################################################################### +- name: Reconcile Cluster Roles and Cluster Role Bindings + hosts: oo_masters_to_config + vars: + origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}" + ent_reconcile_bindings: true + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + tasks: + - name: Reconcile Cluster Roles + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --confirm + run_once: true + + - name: Reconcile Cluster Role Bindings + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + run_once: true + + - set_fact: + reconcile_complete: True + +############################################################################## +# Gate on reconcile +############################################################################## +- name: Gate on reconcile + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + reconcile_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" + - set_fact: + reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" + when: reconcile_failed | length > 0 diff --git a/playbooks/common/openshift-docker/config.yml b/playbooks/common/openshift-docker/config.yml new file mode 100644 index 000000000..092d5533c --- /dev/null +++ b/playbooks/common/openshift-docker/config.yml @@ -0,0 +1,9 @@ +- name: Configure docker hosts + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + vars: + docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" + docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" + docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}" + roles: + - openshift_facts + - openshift_docker diff --git a/playbooks/common/openshift-docker/filter_plugins b/playbooks/common/openshift-docker/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-docker/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-docker/lookup_plugins b/playbooks/common/openshift-docker/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/common/openshift-docker/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-docker/roles b/playbooks/common/openshift-docker/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/common/openshift-docker/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 7d94ced2e..9a5ae0e6b 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -14,7 +14,8 @@ public_hostname: "{{ openshift_public_hostname | default(None) }}" deployment_type: "{{ openshift_deployment_type }}" - role: etcd - local_facts: {} + local_facts: + etcd_image: "{{ osm_etcd_image | default(None) }}" - name: Check status of etcd certificates stat: path: "{{ item }}" @@ -33,7 +34,7 @@ - name: Create temp directory for syncing certs hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -88,11 +89,12 @@ roles: - etcd - role: nickhammond.logrotate + when: not openshift.common.is_containerized | bool - name: Delete temporary directory on localhost hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - file: name={{ g_etcd_mktemp.stdout }} state=absent diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml index 0bf69b22f..fd2bc24ae 100644 --- a/playbooks/common/openshift-etcd/service.yml +++ b/playbooks/common/openshift-etcd/service.yml @@ -1,6 +1,8 @@ --- - name: Populate g_service_masters host group if needed hosts: localhost + connection: local + become: no gather_facts: no tasks: - fail: msg="new_cluster_state is required to be injected in this playbook" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 71c3ccb46..70e6ce0b4 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -43,6 +43,7 @@ api_port: "{{ openshift_master_api_port | default(None) }}" api_url: "{{ openshift_master_api_url | default(None) }}" api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" + controllers_port: "{{ openshift_master_controllers_port | default(None) }}" public_api_url: "{{ openshift_master_public_api_url | default(None) }}" cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" @@ -51,6 +52,7 @@ console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + portal_net: "{{ openshift_master_portal_net | default(None) }}" - name: Check status of external etcd certificatees stat: path: "{{ openshift.common.config_base }}/master/{{ item }}" @@ -70,7 +72,7 @@ - name: Create temp directory for syncing certs hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -84,6 +86,7 @@ etcd_generated_certs_dir: /etc/etcd/generated_certs etcd_needing_client_certs: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) + | default([]) | oo_filter_list(filter_attr='etcd_client_certs_missing') }}" sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" roles: @@ -207,7 +210,7 @@ - name: Compute haproxy_backend_servers hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - set_fact: @@ -217,6 +220,7 @@ hosts: oo_lb_to_config vars: sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" + haproxy_frontend_port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" haproxy_frontends: - name: atomic-openshift-api mode: tcp @@ -232,42 +236,41 @@ balance: source servers: "{{ hostvars.localhost.haproxy_backend_servers }}" roles: + - role: openshift_facts - role: haproxy when: groups.oo_masters_to_config | length > 1 -- name: Generate master session keys +- name: Check for cached session secrets + hosts: oo_first_master + roles: + - role: openshift_facts + post_tasks: + - openshift_facts: + role: master + local_facts: + session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}" + session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}" + +- name: Generate master session secrets hosts: oo_first_master + vars: + g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([]) and openshift.master.session_encryption_secrets | default([])) | length > 0 }}" + g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}" + g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}" + roles: + - role: openshift_facts tasks: - - fail: - msg: "Both openshift_master_session_auth_secrets and openshift_master_session_encryption_secrets must be provided if either variable is set" - when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is not defined) or (openshift_master_session_encryption_secrets is defined and openshift_master_session_auth_secrets is not defined) - - fail: - msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length" - when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length) - - name: Install OpenSSL package - action: "{{ansible_pkg_mgr}} pkg=openssl state=present" - - name: Generate session authentication key - command: /usr/bin/openssl rand -base64 24 - register: session_auth_output - with_sequence: count=1 - when: openshift_master_session_auth_secrets is undefined - - name: Generate session encryption key - command: /usr/bin/openssl rand -base64 24 - register: session_encryption_output - with_sequence: count=1 - when: openshift_master_session_encryption_secrets is undefined - - set_fact: - session_auth_secret: "{{ openshift_master_session_auth_secrets - | default(session_auth_output.results - | oo_collect(attribute='stdout') - | list) }}" - session_encryption_secret: "{{ openshift_master_session_encryption_secrets - | default(session_encryption_output.results - | oo_collect(attribute='stdout') - | list) }}" + - openshift_facts: + role: master + local_facts: + session_auth_secrets: "{{ g_session_auth_secrets }}" + session_encryption_secrets: "{{ g_session_encryption_secrets }}" + when: not g_session_secrets_present | bool - name: Parse named certificates hosts: localhost + connection: local + become: no vars: internal_hostnames: "{{ hostvars[groups.oo_first_master.0].openshift.common.internal_hostnames }}" named_certificates: "{{ hostvars[groups.oo_first_master.0].openshift_master_named_certificates | default([]) }}" @@ -313,13 +316,14 @@ - name: Configure master instances hosts: oo_masters_to_config + any_errors_fatal: true serial: 1 vars: sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" openshift_master_count: "{{ groups.oo_masters_to_config | length }}" - openshift_master_session_auth_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_auth_secret'] }}" - openshift_master_session_encryption_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_encryption_secret'] }}" + openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" + openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" pre_tasks: - name: Ensure certificate directory exists file: @@ -334,6 +338,7 @@ roles: - openshift_master - role: nickhammond.logrotate + when: not openshift.common.is_containerized | bool - role: fluentd_master when: openshift.common.use_fluentd | bool - role: nuage_master @@ -351,7 +356,8 @@ roles: - role: openshift_master_cluster when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - - openshift_examples + - role: openshift_examples + when: openshift.common.install_examples | bool - role: openshift_cluster_metrics when: openshift.common.use_cluster_metrics | bool - role: openshift_manageiq @@ -363,7 +369,7 @@ cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" roles: - role: cockpit - when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and + when: not openshift.common.is_containerized and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and (osm_use_cockpit | bool or osm_use_cockpit is undefined ) - name: Configure flannel @@ -384,7 +390,7 @@ - name: Delete temporary directory on localhost hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - file: name={{ g_master_mktemp.stdout }} state=absent @@ -401,7 +407,15 @@ - name: Create services hosts: oo_first_master + vars: + attach_registry_volume: "{{ groups.oo_nfs_to_config | length > 0 }}" + pre_tasks: + - set_fact: + nfs_host: "{{ groups.oo_nfs_to_config.0 }}" + registry_volume_path: "{{ hostvars[groups.oo_nfs_to_config.0].openshift.nfs.exports_dir + '/' + hostvars[groups.oo_nfs_to_config.0].openshift.nfs.registry_volume }}" + when: attach_registry_volume | bool roles: - role: openshift_router when: openshift.master.infra_nodes is defined - #- role: openshift_registry + - role: openshift_registry + when: openshift.master.infra_nodes is defined and attach_registry_volume | bool diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml new file mode 100644 index 000000000..02449e40d --- /dev/null +++ b/playbooks/common/openshift-master/restart.yml @@ -0,0 +1,151 @@ +--- +- include: ../openshift-cluster/evaluate_groups.yml + +- name: Validate configuration for rolling restart + hosts: oo_masters_to_config + roles: + - openshift_facts + tasks: + - fail: + msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" + when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" + - role: master + local_facts: + cluster_method: "{{ openshift_master_cluster_method | default(None) }}" + +# Creating a temp file on localhost, we then check each system that will +# be rebooted to see if that file exists, if so we know we're running +# ansible on a machine that needs a reboot, and we need to error out. +- name: Create temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - local_action: command mktemp + register: mktemp + changed_when: false + +- name: Check if temp file exists on any masters + hosts: oo_masters_to_config + tasks: + - stat: path="{{ hostvars.localhost.mktemp.stdout }}" + register: exists + changed_when: false + +- name: Cleanup temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent + changed_when: false + +- name: Warn if restarting the system where ansible is running + hosts: oo_masters_to_config + tasks: + - pause: + prompt: > + Warning: Running playbook from a host that will be restarted! + Press CTRL+C and A to abort playbook execution. You may + continue by pressing ENTER but the playbook will stop + executing after this system has been restarted and services + must be verified manually. To only restart services, set + openshift_master_rolling_restart_mode=services in host + inventory and relaunch the playbook. + when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system' + - set_fact: + current_host: "{{ exists.stat.exists }}" + when: openshift.common.rolling_restart_mode == 'system' + +- name: Determine which masters are currently active + hosts: oo_masters_to_config + any_errors_fatal: true + tasks: + - name: Check master service status + command: > + systemctl is-active {{ openshift.common.service_type }}-master + register: active_check_output + when: openshift.master.cluster_method | default(None) == 'pacemaker' + failed_when: false + changed_when: false + - set_fact: + is_active: "{{ active_check_output.stdout == 'active' }}" + when: openshift.master.cluster_method | default(None) == 'pacemaker' + +- name: Evaluate master groups + hosts: localhost + become: no + tasks: + - fail: + msg: > + Did not receive active status from any masters. Please verify pacemaker cluster. + when: "{{ hostvars[groups.oo_first_master.0].openshift.master.cluster_method | default(None) == 'pacemaker' and 'True' not in (hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('is_active') + | list) }}" + - name: Evaluate oo_active_masters + add_host: + name: "{{ item }}" + groups: oo_active_masters + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_masters_to_config | default([]) }}" + when: (hostvars[item]['is_active'] | default(false)) | bool + - name: Evaluate oo_current_masters + add_host: + name: "{{ item }}" + groups: oo_current_masters + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_masters_to_config | default([]) }}" + when: (hostvars[item]['current_host'] | default(false)) | bool + +- name: Validate pacemaker cluster + hosts: oo_active_masters + tasks: + - name: Retrieve pcs status + command: pcs status + register: pcs_status_output + changed_when: false + - fail: + msg: > + Pacemaker cluster validation failed. One or more nodes are not online. + when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool + +- name: Restart masters + hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + serial: 1 + tasks: + - include: restart_hosts.yml + when: openshift.common.rolling_restart_mode == 'system' + - include: restart_services.yml + when: openshift.common.rolling_restart_mode == 'services' + +- name: Restart active masters + hosts: oo_active_masters + serial: 1 + tasks: + - include: restart_hosts_pacemaker.yml + when: openshift.common.rolling_restart_mode == 'system' + - include: restart_services_pacemaker.yml + when: openshift.common.rolling_restart_mode == 'services' + +- name: Restart current masters + hosts: oo_current_masters + serial: 1 + tasks: + - include: restart_hosts.yml + when: openshift.common.rolling_restart_mode == 'system' + - include: restart_services.yml + when: openshift.common.rolling_restart_mode == 'services' diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml new file mode 100644 index 000000000..ff206f5a2 --- /dev/null +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -0,0 +1,39 @@ +- name: Restart master system + # https://github.com/ansible/ansible/issues/10616 + shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" + async: 1 + poll: 0 + ignore_errors: true + become: yes +# When cluster_method != pacemaker we can ensure the api_port is +# available. +- name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" + when: openshift.master.cluster_method != 'pacemaker' +- name: Wait for master to start + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 + port=22 + when: openshift.master.cluster_method == 'pacemaker' +- name: Wait for master to become available + command: pcs status + register: pcs_status_output + until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool + retries: 15 + delay: 2 + changed_when: false + when: openshift.master.cluster_method == 'pacemaker' +- fail: + msg: > + Pacemaker cluster validation failed {{ inventory hostname }} is not online. + when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml new file mode 100644 index 000000000..c9219e8de --- /dev/null +++ b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml @@ -0,0 +1,25 @@ +- name: Fail over master resource + command: > + pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }} +- name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ openshift.master.cluster_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" +- name: Restart master system + # https://github.com/ansible/ansible/issues/10616 + shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" + async: 1 + poll: 0 + ignore_errors: true + become: yes +- name: Wait for master to start + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml new file mode 100644 index 000000000..5e539cd65 --- /dev/null +++ b/playbooks/common/openshift-master/restart_services.yml @@ -0,0 +1,27 @@ +- name: Restart master + service: + name: "{{ openshift.common.service_type }}-master" + state: restarted + when: not openshift_master_ha | bool +- name: Restart master API + service: + name: "{{ openshift.common.service_type }}-master-api" + state: restarted + when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker' +- name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" + when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker' +- name: Restart master controllers + service: + name: "{{ openshift.common.service_type }}-master-controllers" + state: restarted + # Ignore errrors since it is possible that type != simple for + # pre-3.1.1 installations. + ignore_errors: true + when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker' diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml new file mode 100644 index 000000000..e738f3fb6 --- /dev/null +++ b/playbooks/common/openshift-master/restart_services_pacemaker.yml @@ -0,0 +1,10 @@ +- name: Restart master services + command: pcs resource restart master +- name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ openshift.master.cluster_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml index 27e1e66f9..f60c5a2b5 100644 --- a/playbooks/common/openshift-master/service.yml +++ b/playbooks/common/openshift-master/service.yml @@ -2,6 +2,8 @@ - name: Populate g_service_masters host group if needed hosts: localhost gather_facts: no + connection: local + become: no tasks: - fail: msg="new_cluster_state is required to be injected in this playbook" when: new_cluster_state is not defined diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml new file mode 100644 index 000000000..e3f5c17ca --- /dev/null +++ b/playbooks/common/openshift-nfs/config.yml @@ -0,0 +1,5 @@ +--- +- name: Configure nfs hosts + hosts: oo_nfs_to_config + roles: + - role: openshift_storage_nfs diff --git a/playbooks/common/openshift-nfs/filter_plugins b/playbooks/common/openshift-nfs/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-nfs/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-nfs/lookup_plugins b/playbooks/common/openshift-nfs/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/common/openshift-nfs/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-nfs/roles b/playbooks/common/openshift-nfs/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-nfs/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml new file mode 100644 index 000000000..20c8ca248 --- /dev/null +++ b/playbooks/common/openshift-nfs/service.yml @@ -0,0 +1,18 @@ +--- +- name: Populate g_service_nfs host group if needed + hosts: localhost + gather_facts: no + tasks: + - fail: msg="new_cluster_state is required to be injected in this playbook" + when: new_cluster_state is not defined + + - name: Evaluate g_service_nfs + add_host: name={{ item }} groups=g_service_nfs + with_items: oo_host_group_exp | default([]) + +- name: Change state on nfs instance(s) + hosts: g_service_nfs + connection: ssh + gather_facts: no + tasks: + - service: name=nfs-server state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index ead874ca4..3e7bca34e 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -16,6 +16,7 @@ hostname: "{{ openshift_hostname | default(None) }}" public_hostname: "{{ openshift_public_hostname | default(None) }}" deployment_type: "{{ openshift_deployment_type }}" + use_flannel: "{{ openshift_use_flannel | default(None) }}" - role: node local_facts: labels: "{{ openshift_node_labels | default(None) }}" @@ -58,7 +59,7 @@ - name: Create temp directory for syncing certs hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -153,21 +154,15 @@ validate_checksum: yes with_items: nodes_needing_certs -- name: Configure node instances +- name: Deploy node certificates hosts: oo_nodes_to_config vars: sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - # TODO: Prefix flannel role variables. - etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" - openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - pre_tasks: + tasks: - name: Ensure certificate directory exists file: path: "{{ node_cert_dir }}" state: directory - # TODO: notify restart node # possibly test service started time against certificate/config file # timestamps in node to trigger notify @@ -176,13 +171,50 @@ src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz" dest: "{{ node_cert_dir }}" when: certs_missing + +- name: Evaluate node groups + hosts: localhost + become: no + tasks: + - name: Evaluate oo_containerized_master_nodes + add_host: + name: "{{ item }}" + groups: oo_containerized_master_nodes + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_nodes_to_config | default([]) }}" + when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) + +- name: Configure node instances + hosts: oo_containerized_master_nodes + serial: 1 + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" roles: - openshift_node + +- name: Configure node instances + hosts: oo_nodes_to_config:!oo_containerized_master_nodes + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" + roles: + - openshift_node + +- name: Additional node config + hosts: oo_nodes_to_config + vars: + # TODO: Prefix flannel role variables. + etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + roles: - role: flannel when: openshift.common.use_flannel | bool - role: nuage_node when: openshift.common.use_nuage | bool - role: nickhammond.logrotate + when: not openshift.common.is_containerized | bool - role: fluentd_node when: openshift.common.use_fluentd | bool tasks: @@ -193,7 +225,7 @@ - name: Delete temporary directory on localhost hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - file: name={{ mktemp.stdout }} state=absent @@ -215,6 +247,19 @@ | oo_collect('openshift.common.hostname') }}" openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}" pre_tasks: - + # Necessary because when you're on a node that's also a master the master will be + # restarted after the node restarts docker and it will take up to 60 seconds for + # systemd to start the master again + - name: Wait for master API to become available before proceeding + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl -k --head --silent {{ openshift.master.api_url }} + register: api_available_output + until: api_available_output.stdout.find("200 OK") != -1 + retries: 120 + delay: 1 + changed_when: false + when: openshift.common.is_containerized | bool roles: - openshift_manage_node diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml index 5cf83e186..0f07add2a 100644 --- a/playbooks/common/openshift-node/service.yml +++ b/playbooks/common/openshift-node/service.yml @@ -1,6 +1,8 @@ --- - name: Populate g_service_nodes host group if needed hosts: localhost + connection: local + become: no gather_facts: no tasks: - fail: msg="new_cluster_state is required to be injected in this playbook" diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml new file mode 100644 index 000000000..15690e3bf --- /dev/null +++ b/playbooks/gce/openshift-cluster/cluster_hosts.yml @@ -0,0 +1,17 @@ +--- +g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([]) + | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}" + +g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}" + +g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}" + +g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}" + +g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}" + +g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}" + +g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra']) | default([]) }}" + +g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute']) | default([]) }}" diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 745161bcb..84a3f84d4 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -1,30 +1,15 @@ --- # TODO: fix firewall related bug with GCE and origin, since GCE is overriding # /etc/sysconfig/iptables - -- hosts: localhost - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: - g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" - use_sdn: "{{ do_we_use_openshift_sdn }}" - sdn_plugin: "{{ sdn_network_plugin }}" - - include: ../../common/openshift-cluster/config.yml + vars_files: + - ../../gce/openshift-cluster/vars.yml + - ../../gce/openshift-cluster/cluster_hosts.yml vars: - g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}" - g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}" - g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}" - g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}" - g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" - g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" + g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo: "{{ deployment_vars[deployment_type].sudo }}" g_nodeonmaster: true openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 2 + openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ gce_private_ip }}" - openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn }}" - os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}" diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml index 5ae3a8fef..75343dffa 100644 --- a/playbooks/gce/openshift-cluster/join_node.yml +++ b/playbooks/gce/openshift-cluster/join_node.yml @@ -1,9 +1,12 @@ --- - name: Populate oo_hosts_to_update group hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml + - cluster_hosts.yml tasks: - name: Evaluate oo_hosts_to_update add_host: @@ -16,9 +19,12 @@ - name: Populate oo_masters_to_config host group hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml + - cluster_hosts.yml tasks: - name: Evaluate oo_nodes_to_config add_host: @@ -29,11 +35,11 @@ - name: Evaluate oo_first_master add_host: - name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" + name: "{{ master_hosts | first }}" ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: oo_first_master - when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" + when: master_hosts is defined and master_hosts|length > 0 #- include: config.yml - include: ../../common/openshift-node/config.yml @@ -42,6 +48,4 @@ openshift_debug_level: 4 openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ansible_default_ipv4.address }}" - openshift_use_openshift_sdn: true openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} " - os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index d6ef57c45..562bf8d29 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -2,6 +2,7 @@ - name: Launch instance(s) hosts: localhost connection: local + become: no gather_facts: no vars_files: - vars.yml diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index 53b2b9a5e..e67685912 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -1,11 +1,13 @@ --- - name: Generate oo_list_hosts group hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env-{{ cluster_id }} + - set_fact: scratch_group=tag_clusterid-{{ cluster_id }} when: cluster_id != '' - set_fact: scratch_group=all when: cluster_id == '' diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml index 2d0f2ab95..8925de4cb 100644 --- a/playbooks/gce/openshift-cluster/service.yml +++ b/playbooks/gce/openshift-cluster/service.yml @@ -1,28 +1,29 @@ --- - name: Call same systemctl command for openshift on all instance(s) hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml + - cluster_hosts.yml tasks: - fail: msg="cluster_id is required to be injected in this playbook" when: cluster_id is not defined - - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node - add_host: name: "{{ item }}" groups: g_service_nodes ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}" - - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master - add_host: name: "{{ item }}" groups: g_service_masters ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}" - include: ../../common/openshift-node/service.yml - include: ../../common/openshift-master/service.yml diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index de8a75b18..488b62eb9 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -16,10 +16,10 @@ #service_account_permissions: "datastore,logging-write" tags: - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }} - - env-{{ cluster }} + - environment-{{ cluster_env }} + - clusterid-{{ cluster_id }} - host-type-{{ type }} - sub-host-type-{{ g_sub_host_type }} - - env-host-type-{{ cluster }}-openshift-{{ type }} when: instances |length > 0 register: gce diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index e20e0a8bc..faa46c0d6 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -2,17 +2,17 @@ - name: Terminate instance(s) hosts: localhost connection: local + become: no gather_facts: no vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env-{{ cluster_id }} - add_host: name: "{{ item }}" groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) + with_items: (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) - name: Unsubscribe VMs hosts: oo_hosts_to_terminate @@ -27,6 +27,7 @@ - name: Terminate instances(s) hosts: localhost + become: no connection: local gather_facts: no vars_files: diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml index 8096aa654..dadceae58 100644 --- a/playbooks/gce/openshift-cluster/update.yml +++ b/playbooks/gce/openshift-cluster/update.yml @@ -1,9 +1,12 @@ --- - name: Populate oo_hosts_to_update group hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml + - cluster_hosts.yml tasks: - name: Evaluate oo_hosts_to_update add_host: @@ -11,9 +14,7 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])) - | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])) - | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([])) + with_items: "{{ g_all_hosts | default([]) }}" - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml index a8ce8eb22..f004a9e6b 100644 --- a/playbooks/gce/openshift-cluster/vars.yml +++ b/playbooks/gce/openshift-cluster/vars.yml @@ -1,7 +1,12 @@ --- -do_we_use_openshift_sdn: true -sdn_network_plugin: redhat/openshift-ovs-subnet -# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation +debug_level: 2 + +deployment_rhel7_ent_base: + image: rhel-7 + machine_type: n1-standard-1 + ssh_user: + sudo: yes + deployment_vars: origin: image: preinstalled-slave-50g-v5 @@ -13,8 +18,6 @@ deployment_vars: machine_type: n1-standard-1 ssh_user: root sudo: no - enterprise: - image: rhel-7 - machine_type: n1-standard-1 - ssh_user: - sudo: yes + enterprise: "{{ deployment_rhel7_ent_base }}" + openshift-enterprise: "{{ deployment_rhel7_ent_base }}" + atomic-enterprise: "{{ deployment_rhel7_ent_base }}" diff --git a/playbooks/gce/openshift-cluster/wip.yml b/playbooks/gce/openshift-cluster/wip.yml index 51a521a6b..0e3757546 100644 --- a/playbooks/gce/openshift-cluster/wip.yml +++ b/playbooks/gce/openshift-cluster/wip.yml @@ -1,6 +1,7 @@ --- - name: WIP hosts: localhost + become: no connection: local gather_facts: no vars_files: @@ -12,7 +13,7 @@ groups: oo_masters_for_deploy ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]) + with_items: "{{ g_master_hosts | default([]) }}" - name: Deploy OpenShift Services hosts: oo_masters_for_deploy diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml new file mode 100644 index 000000000..15690e3bf --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml @@ -0,0 +1,17 @@ +--- +g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([]) + | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}" + +g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}" + +g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}" + +g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}" + +g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}" + +g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}" + +g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra']) | default([]) }}" + +g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute']) | default([]) }}" diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index 4d1ae22ff..be9cbbfaa 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -2,24 +2,14 @@ # TODO: need to figure out a plan for setting hostname, currently the default # is localhost, so no hostname value (or public_hostname) value is getting # assigned - -- hosts: localhost - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: - g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" - - include: ../../common/openshift-cluster/config.yml + vars_files: + - ../../libvirt/openshift-cluster/vars.yml + - ../../libvirt/openshift-cluster/cluster_hosts.yml vars: - g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}" - g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}" - g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}" - g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}" - g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" - g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" + g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo: "{{ deployment_vars[deployment_type].sudo }}" + g_nodeonmaster: true openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 2 + openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index 8d7949dd1..3a48c82bc 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -1,6 +1,8 @@ --- - name: Launch instance(s) hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml @@ -11,6 +13,7 @@ image_url: "{{ deployment_vars[deployment_type].image.url }}" image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}" image_name: "{{ deployment_vars[deployment_type].image.name }}" + image_compression: "{{ deployment_vars[deployment_type].image.compression }}" tasks: - fail: msg="Deployment type not supported for libvirt provider yet" when: deployment_type == 'online' diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml index 5954bb01e..6cb81ee79 100644 --- a/playbooks/libvirt/openshift-cluster/list.yml +++ b/playbooks/libvirt/openshift-cluster/list.yml @@ -1,11 +1,13 @@ --- - name: Generate oo_list_hosts group hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env-{{ cluster_id }} + - set_fact: scratch_group=tag_clusterid-{{ cluster_id }} when: cluster_id != '' - set_fact: scratch_group=all when: cluster_id == '' @@ -21,6 +23,8 @@ - name: List Hosts hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml diff --git a/playbooks/libvirt/openshift-cluster/service.yml b/playbooks/libvirt/openshift-cluster/service.yml index ae095f5a2..cd07c8701 100644 --- a/playbooks/libvirt/openshift-cluster/service.yml +++ b/playbooks/libvirt/openshift-cluster/service.yml @@ -5,6 +5,8 @@ - name: Call same systemctl command for openshift on all instance(s) hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml @@ -18,7 +20,7 @@ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: g_service_masters - with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]) + with_items: "{{ g_master_hosts | default([]) }}" - name: Evaluate g_service_nodes add_host: @@ -26,7 +28,7 @@ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: g_service_nodes - with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]) + with_items: "{{ g_node_hosts | default([]) }}" - include: ../../common/openshift-node/service.yml - include: ../../common/openshift-master/service.yml diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml index 8a67d713f..397158b9e 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml @@ -4,13 +4,17 @@ dest: "{{ libvirt_storage_pool_path }}" state: directory +# We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set. - acl: - default: yes + default: "{{ item }}" entity: kvm etype: group name: "{{ libvirt_storage_pool_path }}" permissions: rwx state: present + with_items: + - no + - yes - name: Test if libvirt storage pool for openshift already exists command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}" diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 4825207c9..ff1cedc94 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -13,8 +13,15 @@ get_url: url: '{{ image_url }}' sha256sum: '{{ image_sha256 }}' - dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' + dest: '{{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}' + register: downloaded_image + +- name: Uncompress Base Cloud image + command: 'unxz -kf {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' + args: + creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' + when: image_compression in ["xz"] and downloaded_image.changed - name: Create the cloud-init config drive path file: @@ -81,7 +88,7 @@ ansible_ssh_host: '{{ item.1 }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}' + groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}" with_together: - instances - ips diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml index 870bcf2a6..0ca8e0974 100644 --- a/playbooks/libvirt/openshift-cluster/templates/domain.xml +++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml @@ -3,8 +3,8 @@ <memory unit='GiB'>1</memory> <metadata xmlns:ansible="https://github.com/ansible/ansible"> <ansible:tags> - <ansible:tag>env-{{ cluster }}</ansible:tag> - <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag> + <ansible:tag>environment-{{ cluster_env }}</ansible:tag> + <ansible:tag>clusterid-{{ cluster }}</ansible:tag> <ansible:tag>host-type-{{ type }}</ansible:tag> <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag> </ansible:tags> diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml index 8f00812a9..8d845c8f2 100644 --- a/playbooks/libvirt/openshift-cluster/terminate.yml +++ b/playbooks/libvirt/openshift-cluster/terminate.yml @@ -3,11 +3,13 @@ - name: Terminate instance(s) hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml tasks: - - set_fact: cluster_group=tag_env-{{ cluster_id }} + - set_fact: cluster_group=tag_clusterid-{{ cluster_id }} - add_host: name: "{{ item }}" groups: oo_hosts_to_terminate @@ -28,6 +30,8 @@ - name: Terminate instance(s) hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml index d09832c16..2dc540978 100644 --- a/playbooks/libvirt/openshift-cluster/update.yml +++ b/playbooks/libvirt/openshift-cluster/update.yml @@ -1,9 +1,12 @@ --- - name: Populate oo_hosts_to_update group hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml + - cluster_hosts.yml tasks: - name: Evaluate oo_hosts_to_update add_host: @@ -11,9 +14,7 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])) - | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])) - | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([])) + with_items: "{{ g_all_hosts | default([]) }}" - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml index c77a0797e..da628786b 100644 --- a/playbooks/libvirt/openshift-cluster/vars.yml +++ b/playbooks/libvirt/openshift-cluster/vars.yml @@ -3,16 +3,32 @@ libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-open libvirt_storage_pool: 'openshift-ansible' libvirt_network: openshift-ansible libvirt_uri: 'qemu:///system' +debug_level: 2 + +# Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication. +# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work. +deployment_rhel7_ent_base: + image: + url: "{{ lookup('oo_option', 'image_url') | + default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}" + name: "{{ lookup('oo_option', 'image_name') | + default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}" + sha256: "{{ lookup('oo_option', 'image_sha256') | + default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}" + ssh_user: openshift + sudo: yes deployment_vars: origin: image: url: "{{ lookup('oo_option', 'image_url') | - default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2', True) }}" + default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz', True) }}" + compression: "{{ lookup('oo_option', 'image_compression') | + default('xz', True) }}" name: "{{ lookup('oo_option', 'image_name') | default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}" sha256: "{{ lookup('oo_option', 'image_sha256') | - default('e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab', True) }}" + default('9461006300d65172f5668d8875f2aad7b54f7ba4e9c5435d65a84a5a2d66e39b', True) }}" ssh_user: openshift sudo: yes online: @@ -22,18 +38,6 @@ deployment_vars: sha256: ssh_user: root sudo: no - enterprise: - image: - url: "{{ lookup('oo_option', 'image_url') | - default('https://access.cdn.redhat.com//content/origin/files/sha256/ff/ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3/rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}" - name: "{{ lookup('oo_option', 'image_name') | - default('rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}" - sha256: "{{ lookup('oo_option', 'image_sha256') | - default('ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3', True) }}" - ssh_user: openshift - sudo: yes -# origin: -# fedora: -# url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2" -# name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2 -# sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86 + enterprise: "{{ deployment_rhel7_ent_base }}" + openshift-enterprise: "{{ deployment_rhel7_ent_base }}" + atomic-enterprise: "{{ deployment_rhel7_ent_base }}" diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml new file mode 100644 index 000000000..1023f3ec1 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml @@ -0,0 +1,17 @@ +--- +g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([]) + | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}" + +g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}" + +g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}" + +g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}" + +g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}" + +g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}" + +g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra']) | default([]) }}" + +g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute']) | default([]) }}" diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml index 888804e28..b338d2eb4 100644 --- a/playbooks/openstack/openshift-cluster/config.yml +++ b/playbooks/openstack/openshift-cluster/config.yml @@ -1,21 +1,13 @@ -- hosts: localhost - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: - g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" - +--- - include: ../../common/openshift-cluster/config.yml + vars_files: + - ../../openstack/openshift-cluster/vars.yml + - ../../openstack/openshift-cluster/cluster_hosts.yml vars: - g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" - g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}" - g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" - g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}" - g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" - g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" + g_nodeonmaster: true + g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo: "{{ deployment_vars[deployment_type].sudo }}" openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 2 + openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ansible_default_ipv4.address }}" diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index bfd73c777..4f6a59a30 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -4,6 +4,11 @@ description: OpenShift cluster parameters: + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + cluster_id: type: string label: Cluster ID @@ -345,13 +350,14 @@ resources: params: cluster_id: { get_param: cluster_id } k8s_type: etcd - cluster_id: { get_param: cluster_id } - type: etcd - image: { get_param: etcd_image } - flavor: { get_param: etcd_flavor } - key_name: { get_resource: keypair } - net: { get_resource: net } - subnet: { get_resource: subnet } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: etcd + image: { get_param: etcd_image } + flavor: { get_param: etcd_flavor } + key_name: { get_resource: keypair } + net: { get_resource: net } + subnet: { get_resource: subnet } secgrp: - { get_resource: etcd-secgrp } floating_network: { get_param: floating_ip_pool } @@ -375,13 +381,14 @@ resources: params: cluster_id: { get_param: cluster_id } k8s_type: master - cluster_id: { get_param: cluster_id } - type: master - image: { get_param: master_image } - flavor: { get_param: master_flavor } - key_name: { get_resource: keypair } - net: { get_resource: net } - subnet: { get_resource: subnet } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: master + image: { get_param: master_image } + flavor: { get_param: master_flavor } + key_name: { get_resource: keypair } + net: { get_resource: net } + subnet: { get_resource: subnet } secgrp: - { get_resource: master-secgrp } floating_network: { get_param: floating_ip_pool } @@ -406,14 +413,15 @@ resources: cluster_id: { get_param: cluster_id } k8s_type: node sub_host_type: compute - cluster_id: { get_param: cluster_id } - type: node - subtype: compute - image: { get_param: node_image } - flavor: { get_param: node_flavor } - key_name: { get_resource: keypair } - net: { get_resource: net } - subnet: { get_resource: subnet } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: node + subtype: compute + image: { get_param: node_image } + flavor: { get_param: node_flavor } + key_name: { get_resource: keypair } + net: { get_resource: net } + subnet: { get_resource: subnet } secgrp: - { get_resource: node-secgrp } floating_network: { get_param: floating_ip_pool } @@ -438,14 +446,15 @@ resources: cluster_id: { get_param: cluster_id } k8s_type: node sub_host_type: infra - cluster_id: { get_param: cluster_id } - type: node - subtype: infra - image: { get_param: infra_image } - flavor: { get_param: infra_flavor } - key_name: { get_resource: keypair } - net: { get_resource: net } - subnet: { get_resource: subnet } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: node + subtype: infra + image: { get_param: infra_image } + flavor: { get_param: infra_flavor } + key_name: { get_resource: keypair } + net: { get_resource: net } + subnet: { get_resource: subnet } secgrp: - { get_resource: node-secgrp } - { get_resource: infra-secgrp } diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml index 9dcab3e60..f83f2c984 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml @@ -9,6 +9,11 @@ parameters: label: Name description: Name + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + cluster_id: type: string label: Cluster ID @@ -105,14 +110,9 @@ resources: user_data: { get_file: user-data } user_data_format: RAW metadata: - env: { get_param: cluster_id } + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } host-type: { get_param: type } - env-host-type: - str_replace: - template: cluster_id-openshift-type - params: - cluster_id: { get_param: cluster_id } - type: { get_param: type } sub-host-type: { get_param: subtype } port: diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index b18512495..fdcb77acc 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -1,6 +1,7 @@ --- - name: Launch instance(s) hosts: localhost + become: no connection: local gather_facts: no vars_files: @@ -28,6 +29,7 @@ - name: Create or Update OpenStack Stack command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }} + -P cluster_env={{ cluster_env }} -P cluster_id={{ cluster_id }} -P cidr={{ openstack_network_cidr }} -P dns_nameservers={{ openstack_network_dns | join(",") }} @@ -70,7 +72,7 @@ ansible_ssh_host: '{{ item[2] }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env_{{ cluster_id }}, tag_host-type_etcd, tag_env-host-type_{{ cluster_id }}-openshift-etcd, tag_sub-host-type_default' + groups: 'tag_environment_{{ cluster_env }}, tag_host-type_etcd, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}' with_together: - parsed_outputs.etcd_names - parsed_outputs.etcd_ips @@ -82,7 +84,7 @@ ansible_ssh_host: '{{ item[2] }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_env-host-type_{{ cluster_id }}-openshift-master, tag_sub-host-type_default' + groups: 'tag_environment_{{ cluster_env }}, tag_host-type_master, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}' with_together: - parsed_outputs.master_names - parsed_outputs.master_ips @@ -94,7 +96,7 @@ ansible_ssh_host: '{{ item[2] }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_compute' + groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_compute, tag_clusterid_{{ cluster_id }}' with_together: - parsed_outputs.node_names - parsed_outputs.node_ips @@ -106,7 +108,7 @@ ansible_ssh_host: '{{ item[2] }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_infra' + groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_infra, tag_clusterid_{{ cluster_id }}' with_together: - parsed_outputs.infra_names - parsed_outputs.infra_ips diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml index fa194b072..123ebd323 100644 --- a/playbooks/openstack/openshift-cluster/list.yml +++ b/playbooks/openstack/openshift-cluster/list.yml @@ -1,11 +1,13 @@ --- - name: Generate oo_list_hosts group hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env_{{ cluster_id }} + - set_fact: scratch_group=tag_clusterid_{{ cluster_id }} when: cluster_id != '' - set_fact: scratch_group=all when: cluster_id == '' @@ -22,6 +24,8 @@ - name: List Hosts hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml index 62df2be73..d4ab51fa7 100644 --- a/playbooks/openstack/openshift-cluster/terminate.yml +++ b/playbooks/openstack/openshift-cluster/terminate.yml @@ -1,17 +1,17 @@ - name: Terminate instance(s) hosts: localhost + become: no connection: local gather_facts: no vars_files: - vars.yml tasks: - - set_fact: cluster_group=tag_env_{{ cluster_id }} - add_host: name: "{{ item }}" groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[cluster_group] | default([]) + with_items: (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([])) - name: Unsubscribe VMs hosts: oo_hosts_to_terminate @@ -25,6 +25,7 @@ default('no', True) | lower in ['no', 'false'] - hosts: localhost + become: no connection: local gather_facts: no vars_files: diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml index e006aa74a..2dc540978 100644 --- a/playbooks/openstack/openshift-cluster/update.yml +++ b/playbooks/openstack/openshift-cluster/update.yml @@ -1,9 +1,12 @@ --- - name: Populate oo_hosts_to_update group hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml + - cluster_hosts.yml tasks: - name: Evaluate oo_hosts_to_update add_host: @@ -11,9 +14,7 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])) - | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])) - | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([])) + with_items: "{{ g_all_hosts | default([]) }}" - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml index e3796c91f..76cde1706 100644 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ b/playbooks/openstack/openshift-cluster/vars.yml @@ -1,4 +1,5 @@ --- +debug_level: 2 openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) | default('files/heat_stack.yaml', True) }}" openstack_network_cidr: "{{ lookup('oo_option', 'net_cidr' ) | @@ -19,6 +20,11 @@ openstack_flavor: infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}" node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}" +deployment_rhel7_ent_base: + image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}" + ssh_user: openshift + sudo: yes + deployment_vars: origin: image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}" @@ -28,7 +34,6 @@ deployment_vars: image: ssh_user: root sudo: no - enterprise: - image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.1-20150224.0.x86_64', True) }}" - ssh_user: openshift - sudo: yes + enterprise: "{{ deployment_rhel7_ent_base }}" + openshift-enterprise: "{{ deployment_rhel7_ent_base }}" + atomic-enterprise: "{{ deployment_rhel7_ent_base }}" diff --git a/roles/ansible/tasks/main.yml b/roles/ansible/tasks/main.yml index f79273824..ea14fb39a 100644 --- a/roles/ansible/tasks/main.yml +++ b/roles/ansible/tasks/main.yml @@ -2,16 +2,8 @@ # Install ansible client - name: Install Ansible - yum: - pkg: ansible - state: installed - when: ansible_pkg_mgr == "yum" - -- name: Install Ansible - dnf: - pkg: ansible - state: installed - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=ansible state=present" + when: not openshift.common.is_containerized | bool - include: config.yml vars: diff --git a/roles/ansible_tower/tasks/main.yaml b/roles/ansible_tower/tasks/main.yaml index b7757214d..36fc9b282 100644 --- a/roles/ansible_tower/tasks/main.yaml +++ b/roles/ansible_tower/tasks/main.yaml @@ -1,6 +1,6 @@ --- - name: install some useful packages - yum: name={{ item }} + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" with_items: - git - python-pip diff --git a/roles/ansible_tower_cli/tasks/main.yml b/roles/ansible_tower_cli/tasks/main.yml index 41fac22a0..0c5163b50 100644 --- a/roles/ansible_tower_cli/tasks/main.yml +++ b/roles/ansible_tower_cli/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Install python-ansible-tower-cli - yum: name=python-ansible-tower-cli + action: "{{ ansible_pkg_mgr }} name=python-ansible-tower-cli state=present" - template: src: tower_cli.cfg.j2 diff --git a/roles/chrony/README.md b/roles/chrony/README.md new file mode 100644 index 000000000..bf15d9669 --- /dev/null +++ b/roles/chrony/README.md @@ -0,0 +1,31 @@ +Role Name +========= + +A role to configure chrony as the ntp client + +Requirements +------------ + + +Role Variables +-------------- + +chrony_ntp_servers: a list of ntp servers to use the chrony.conf file + +Dependencies +------------ + +roles/lib_timedatectl + +Example Playbook +---------------- + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift Operations diff --git a/roles/chrony/defaults/main.yml b/roles/chrony/defaults/main.yml new file mode 100644 index 000000000..95576e666 --- /dev/null +++ b/roles/chrony/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for chrony diff --git a/roles/chrony/handlers/main.yml b/roles/chrony/handlers/main.yml new file mode 100644 index 000000000..1973c79e2 --- /dev/null +++ b/roles/chrony/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart chronyd + service: + name: chronyd + state: restarted diff --git a/roles/chrony/meta/main.yml b/roles/chrony/meta/main.yml new file mode 100644 index 000000000..85595d7c3 --- /dev/null +++ b/roles/chrony/meta/main.yml @@ -0,0 +1,18 @@ +--- +galaxy_info: + author: Openshift Operations + description: Configure chrony as an ntp server + company: Red Hat + license: Apache 2.0 + min_ansible_version: 1.9.2 + platforms: + - name: EL + versions: + - 7 + - name: Fedora + versions: + - all + categories: + - system +dependencies: +- roles/lib_timedatectl diff --git a/roles/chrony/tasks/main.yml b/roles/chrony/tasks/main.yml new file mode 100644 index 000000000..fae6d8e4c --- /dev/null +++ b/roles/chrony/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: remove ntp package + yum: + name: ntp + state: absent + +- name: ensure chrony package is installed + yum: + name: chrony + state: installed + +- name: Install /etc/chrony.conf + template: + src: chrony.conf.j2 + dest: /etc/chrony.conf + owner: root + group: root + mode: 0644 + notify: + - Restart chronyd + +- name: enabled timedatectl set-ntp yes + timedatectl: + ntp: True + +- name: + service: + name: chronyd + state: started + enabled: yes diff --git a/roles/chrony/templates/chrony.conf.j2 b/roles/chrony/templates/chrony.conf.j2 new file mode 100644 index 000000000..de43b6364 --- /dev/null +++ b/roles/chrony/templates/chrony.conf.j2 @@ -0,0 +1,45 @@ +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% for server in chrony_ntp_servers %} +server {{ server }} iburst +{% endfor %} + +# Ignore stratum in source selection. +stratumweight 0 + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# Enable kernel RTC synchronization. +rtcsync + +# In first three updates step the system clock instead of slew +# if the adjustment is larger than 10 seconds. +makestep 10 3 + +# Allow NTP client access from local network. +#allow 192.168/16 + +# Listen for commands only on localhost. +bindcmdaddress 127.0.0.1 +bindcmdaddress ::1 + +# Serve time even if not synchronized to any NTP server. +#local stratum 10 + +keyfile /etc/chrony.keys + +# Specify the key used as password for chronyc. +commandkey 1 + +# Generate command key if missing. +generatecommandkey + +# Disable logging of client accesses. +noclientlog + +# Send a message to syslog if a clock adjustment is larger than 0.5 seconds. +logchange 0.5 + +logdir /var/log/chrony +#log measurements statistics tracking diff --git a/roles/chrony/vars/main.yml b/roles/chrony/vars/main.yml new file mode 100644 index 000000000..061a21547 --- /dev/null +++ b/roles/chrony/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for chrony diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index 8410e7c90..6e9f3a8bd 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -1,25 +1,12 @@ --- - name: Install cockpit-ws - yum: - name: "{{ item }}" - state: present + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" with_items: - cockpit-ws - cockpit-shell - cockpit-bridge - "{{ cockpit_plugins }}" - when: ansible_pkg_mgr == "yum" - -- name: Install cockpit-ws - dnf: - name: "{{ item }}" - state: present - with_items: - - cockpit-ws - - cockpit-shell - - cockpit-bridge - - "{{ cockpit_plugins }}" - when: ansible_pkg_mgr == "dnf" + when: not openshift.common.is_containerized | bool - name: Enable cockpit-ws service: diff --git a/roles/copr_cli/tasks/main.yml b/roles/copr_cli/tasks/main.yml index f8496199d..b732fb7a4 100644 --- a/roles/copr_cli/tasks/main.yml +++ b/roles/copr_cli/tasks/main.yml @@ -1,10 +1,3 @@ --- -- yum: - name: copr-cli - state: present - when: ansible_pkg_mgr == "yum" - -- dnf: - name: copr-cli - state: present - when: ansible_pkg_mgr == "dnf" +- action: "{{ ansible_pkg_mgr }} name=copr-cli state=present" + when: not openshift.common.is_containerized | bool diff --git a/roles/docker/README.md b/roles/docker/README.md index 46f259eb7..6b5ee4421 100644 --- a/roles/docker/README.md +++ b/roles/docker/README.md @@ -1,4 +1,4 @@ -Role Name +Docker ========= Ensures docker package is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes. diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index 7d60f1891..9f827417f 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -1,7 +1,10 @@ --- - name: restart docker - service: name=docker state=restarted + service: + name: docker + state: restarted + when: not docker_service_status_changed | default(false) - name: restart udev service: diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 857674454..a56f1f391 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -1,15 +1,18 @@ --- # tasks file for docker - name: Install docker - yum: pkg=docker - when: ansible_pkg_mgr == "yum" - -- name: Install docker - dnf: pkg=docker - when: ansible_pkg_mgr == "dnf" - + action: "{{ ansible_pkg_mgr }} name=docker state=present" + when: not openshift.common.is_atomic | bool + - name: enable and start the docker service - service: name=docker enabled=yes state=started + service: + name: docker + enabled: yes + state: started + register: start_result + +- set_fact: + docker_service_status_changed: start_result | changed - include: udev_workaround.yml when: docker_udev_workaround | default(False) diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 0fd3de585..9e7fa59cf 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -1,4 +1,5 @@ --- +etcd_service: "{{ 'etcd' if not openshift.common.is_containerized else 'etcd_container' }}" etcd_interface: "{{ ansible_default_ipv4.interface }}" etcd_client_port: 2379 etcd_peer_port: 2380 diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml index 4c0efb97b..e00e1cac4 100644 --- a/roles/etcd/handlers/main.yml +++ b/roles/etcd/handlers/main.yml @@ -1,4 +1,5 @@ --- + - name: restart etcd - service: name=etcd state=restarted - when: not etcd_service_status_changed | default(false) + service: name={{ etcd_service }} state=restarted + when: not (etcd_service_status_changed | default(false) | bool) diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index efaab5f31..1e97b047b 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -8,27 +8,52 @@ when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4" - name: Install etcd - yum: pkg=etcd-2.* state=present - when: ansible_pkg_mgr == "yum" + action: "{{ ansible_pkg_mgr }} name=etcd-2.* state=present" + when: not openshift.common.is_containerized | bool -- name: Install etcd - dnf: pkg=etcd* state=present - when: ansible_pkg_mgr == "dnf" +- name: Pull etcd container + command: docker pull {{ openshift.etcd.etcd_image }} + when: openshift.common.is_containerized | bool + +- name: Install etcd container service file + template: + dest: "/etc/systemd/system/etcd_container.service" + src: etcd.docker.service + register: install_etcd_result + when: openshift.common.is_containerized | bool + +- name: Ensure etcd datadir exists + when: openshift.common.is_containerized | bool + file: + path: "{{ etcd_data_dir }}" + state: directory + mode: 0700 + +- name: Disable system etcd when containerized + when: openshift.common.is_containerized | bool + service: + name: etcd + state: stopped + enabled: no + +- name: Reload systemd units + command: systemctl daemon-reload + when: openshift.common.is_containerized and ( install_etcd_result | changed ) - name: Validate permissions on the config dir file: path: "{{ etcd_conf_dir }}" state: directory - owner: etcd - group: etcd + owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}" + group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}" mode: 0700 - name: Validate permissions on certificate files file: path: "{{ item }}" mode: 0600 - group: etcd - owner: etcd + owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}" + group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}" when: etcd_url_scheme == 'https' with_items: - "{{ etcd_ca_file }}" @@ -39,8 +64,8 @@ file: path: "{{ item }}" mode: 0600 - group: etcd - owner: etcd + owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}" + group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}" when: etcd_peer_url_scheme == 'https' with_items: - "{{ etcd_peer_ca_file }}" @@ -57,10 +82,10 @@ - name: Enable etcd service: - name: etcd + name: "{{ etcd_service }}" state: started enabled: yes register: start_result - set_fact: - etcd_service_status_changed = start_result | changed + etcd_service_status_changed: "{{ start_result | changed }}" diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 index 32577c96c..28816fd87 100644 --- a/roles/etcd/templates/etcd.conf.j2 +++ b/roles/etcd/templates/etcd.conf.j2 @@ -15,13 +15,13 @@ ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }} ETCD_NAME=default {% endif %} ETCD_DATA_DIR={{ etcd_data_dir }} -#ETCD_SNAPSHOT_COUNTER="10000" -ETCD_HEARTBEAT_INTERVAL="500" -ETCD_ELECTION_TIMEOUT="2500" +#ETCD_SNAPSHOT_COUNTER=10000 +ETCD_HEARTBEAT_INTERVAL=500 +ETCD_ELECTION_TIMEOUT=2500 ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }} -#ETCD_MAX_SNAPSHOTS="5" -#ETCD_MAX_WALS="5" -#ETCD_CORS="" +#ETCD_MAX_SNAPSHOTS=5 +#ETCD_MAX_WALS=5 +#ETCD_CORS= {% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %} #[cluster] @@ -29,15 +29,15 @@ ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }} ETCD_INITIAL_CLUSTER={{ initial_cluster() }} ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }} ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }} -#ETCD_DISCOVERY="" -#ETCD_DISCOVERY_SRV="" -#ETCD_DISCOVERY_FALLBACK="proxy" -#ETCD_DISCOVERY_PROXY="" +#ETCD_DISCOVERY= +#ETCD_DISCOVERY_SRV= +#ETCD_DISCOVERY_FALLBACK=proxy +#ETCD_DISCOVERY_PROXY= {% endif %} ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} #[proxy] -#ETCD_PROXY="off" +#ETCD_PROXY=off #[security] {% if etcd_url_scheme == 'https' -%} diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service new file mode 100644 index 000000000..8058fa188 --- /dev/null +++ b/roles/etcd/templates/etcd.docker.service @@ -0,0 +1,13 @@ +[Unit] +Description=The Etcd Server container +After=docker.service + +[Service] +EnvironmentFile=/etc/etcd/etcd.conf +ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }} +ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:z --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }} +ExecStop=/usr/bin/docker stop {{ etcd_service }} +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index 86e1bc96e..aa27b674e 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -1,13 +1,8 @@ --- - name: Install flannel sudo: true - yum: pkg=flannel state=present - when: ansible_pkg_mgr == "yum" - -- name: Install flannel - sudo: true - dnf: pkg=flannel state=present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=flannel state=present" + when: not openshift.common.is_containerized | bool - name: Set flannel etcd url sudo: true diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml index 43c499b4d..1c87d562a 100644 --- a/roles/fluentd_master/tasks/main.yml +++ b/roles/fluentd_master/tasks/main.yml @@ -1,16 +1,12 @@ --- -# TODO: Update fluentd install and configuration when packaging is complete -- name: download and install td-agent - yum: - name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' - state: present - when: ansible_pkg_mgr == "yum" +- fail: + msg: "fluentd master is not yet supported on atomic hosts" + when: openshift.common.is_containerized | bool +# TODO: Update fluentd install and configuration when packaging is complete - name: download and install td-agent - dnf: - name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' - state: present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present" + when: not openshift.common.is_containerized | bool - name: Verify fluentd plugin installed command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes' diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml index 827a1c075..8d34c0b19 100644 --- a/roles/fluentd_node/tasks/main.yml +++ b/roles/fluentd_node/tasks/main.yml @@ -1,16 +1,12 @@ --- -# TODO: Update fluentd install and configuration when packaging is complete -- name: download and install td-agent - yum: - name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' - state: present - when: ansible_pkg_mgr == "yum" +- fail: + msg: "fluentd node is not yet supported on atomic hosts" + when: openshift.common.is_containerized | bool +# TODO: Update fluentd install and configuration when packaging is complete - name: download and install td-agent - dnf: - name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' - state: present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present" + when: not openshift.common.is_containerized | bool - name: Verify fluentd plugin installed command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes' diff --git a/roles/haproxy/defaults/main.yml b/roles/haproxy/defaults/main.yml index 7ba5bd485..937d94209 100644 --- a/roles/haproxy/defaults/main.yml +++ b/roles/haproxy/defaults/main.yml @@ -1,4 +1,6 @@ --- +haproxy_frontend_port: 80 + haproxy_frontends: - name: main binds: @@ -18,4 +20,4 @@ os_firewall_allow: - service: haproxy stats port: "9000/tcp" - service: haproxy balance - port: "8443/tcp" + port: "{{ haproxy_frontend_port }}/tcp" diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml index ee60adcab..5b8691b26 100644 --- a/roles/haproxy/handlers/main.yml +++ b/roles/haproxy/handlers/main.yml @@ -3,3 +3,4 @@ service: name: haproxy state: restarted + when: not (haproxy_start_result_changed | default(false) | bool) diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml index 5d015fadd..0b8370ce2 100644 --- a/roles/haproxy/tasks/main.yml +++ b/roles/haproxy/tasks/main.yml @@ -1,15 +1,7 @@ --- - name: Install haproxy - yum: - pkg: haproxy - state: present - when: ansible_pkg_mgr == "yum" - -- name: Install haproxy - dnf: - pkg: haproxy - state: present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=haproxy state=present" + when: not openshift.common.is_containerized | bool - name: Configure haproxy template: @@ -27,6 +19,5 @@ enabled: yes register: start_result -- name: Pause 30 seconds if haproxy was just started - pause: seconds=30 - when: start_result | changed +- set_fact: + haproxy_start_result_changed: "{{ start_result | changed }}" diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml index 3fcb9fd18..5432a5e2f 100644 --- a/roles/kube_nfs_volumes/tasks/main.yml +++ b/roles/kube_nfs_volumes/tasks/main.yml @@ -1,11 +1,11 @@ --- -- name: Install pyparted (RedHat/Fedora) - yum: name=pyparted,python-httplib2 state=present - when: ansible_pkg_mgr == "yum" +- fail: + msg: "This role is not yet supported on atomic hosts" + when: openshift.common.is_atomic | bool - name: Install pyparted (RedHat/Fedora) - dnf: name=pyparted,python-httplib2 state=present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=pyparted,python-httplib2 state=present" + when: not openshift.common.is_containerized | bool - name: partition the drives partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }} diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml index a58a7b824..9a68ceb8d 100644 --- a/roles/kube_nfs_volumes/tasks/nfs.yml +++ b/roles/kube_nfs_volumes/tasks/nfs.yml @@ -1,11 +1,7 @@ --- -- name: Install NFS server on Fedora/Red Hat - yum: name=nfs-utils state=present - when: ansible_pkg_mgr == "yum" - -- name: Install NFS server on Fedora/Red Hat - dnf: name=nfs-utils state=present - when: ansible_pkg_mgr == "dnf" +- name: Install NFS server + action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present" + when: not openshift.common.is_containerized | bool - name: Start rpcbind on Fedora/Red Hat service: name=rpcbind state=started enabled=yes diff --git a/roles/lib_timedatectl/library/timedatectl.py b/roles/lib_timedatectl/library/timedatectl.py new file mode 100644 index 000000000..b6eab5918 --- /dev/null +++ b/roles/lib_timedatectl/library/timedatectl.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +''' + timedatectl ansible module + + This module supports setting ntp enabled +''' +import subprocess + + + + +def do_timedatectl(options=None): + ''' subprocess timedatectl ''' + + cmd = ['/usr/bin/timedatectl'] + if options: + cmd += options.split() + + proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE) + proc.wait() + return proc.stdout.read() + +def main(): + ''' Ansible module for timedatectl + ''' + + module = AnsibleModule( + argument_spec=dict( + #state=dict(default='enabled', type='str'), + ntp=dict(default=True, type='bool'), + ), + #supports_check_mode=True + ) + + # do something + ntp_enabled = False + + results = do_timedatectl() + + for line in results.split('\n'): + if 'NTP enabled' in line: + if 'yes' in line: + ntp_enabled = True + + ######## + # Enable NTP + ######## + if module.params['ntp']: + if ntp_enabled: + module.exit_json(changed=False, results="enabled", state="enabled") + + # Enable it + # Commands to enable ntp + else: + results = do_timedatectl('set-ntp yes') + module.exit_json(changed=True, results="enabled", state="enabled", cmdout=results) + + ######### + # Disable NTP + ######### + else: + if not ntp_enabled: + module.exit_json(changed=False, results="disabled", state="disabled") + + results = do_timedatectl('set-ntp no') + module.exit_json(changed=True, results="disabled", state="disabled") + + module.exit_json(failed=True, changed=False, results="Something went wrong", state="unknown") + +# Pylint is getting in the way of basic Ansible +# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_action.py b/roles/lib_zabbix/library/zbx_action.py index 8bb586c0b..2f9524556 100644 --- a/roles/lib_zabbix/library/zbx_action.py +++ b/roles/lib_zabbix/library/zbx_action.py @@ -30,6 +30,17 @@ # pylint: disable=import-error from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection, ZabbixAPIError +CUSTOM_SCRIPT_ACTION = '0' +IPMI_ACTION = '1' +SSH_ACTION = '2' +TELNET_ACTION = '3' +GLOBAL_SCRIPT_ACTION = '4' + +EXECUTE_ON_ZABBIX_AGENT = '0' +EXECUTE_ON_ZABBIX_SERVER = '1' + +OPERATION_REMOTE_COMMAND = '1' + def exists(content, key='result'): ''' Check if key exists in content or the size of content[key] > 0 ''' @@ -70,14 +81,91 @@ def filter_differences(zabbix_filters, user_filters): return rval -# This logic is quite complex. We are comparing two lists of dictionaries. -# The outer for-loops allow us to descend down into both lists at the same time -# and then walk over the key,val pairs of the incoming user dict's changes -# or updates. The if-statements are looking at different sub-object types and -# comparing them. The other suggestion on how to write this is to write a recursive -# compare function but for the time constraints and for complexity I decided to go -# this route. -# pylint: disable=too-many-branches +def opconditions_diff(zab_val, user_val): + ''' Report whether there are differences between opconditions on + zabbix and opconditions supplied by user ''' + + if len(zab_val) != len(user_val): + return True + + for z_cond, u_cond in zip(zab_val, user_val): + if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \ + ['conditiontype', 'operator', 'value']]): + return True + + return False + +def opmessage_diff(zab_val, user_val): + ''' Report whether there are differences between opmessage on + zabbix and opmessage supplied by user ''' + + for op_msg_key, op_msg_val in user_val.items(): + if zab_val[op_msg_key] != str(op_msg_val): + return True + + return False + +def opmessage_grp_diff(zab_val, user_val): + ''' Report whether there are differences between opmessage_grp + on zabbix and opmessage_grp supplied by user ''' + + zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab_val]) + usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in user_val]) + if usr_grp_ids != zab_grp_ids: + return True + + return False + +def opmessage_usr_diff(zab_val, user_val): + ''' Report whether there are differences between opmessage_usr + on zabbix and opmessage_usr supplied by user ''' + + zab_usr_ids = set([usr['usrid'] for usr in zab_val]) + usr_ids = set([usr['usrid'] for usr in user_val]) + if usr_ids != zab_usr_ids: + return True + + return False + +def opcommand_diff(zab_op_cmd, usr_op_cmd): + ''' Check whether user-provided opcommand matches what's already + stored in Zabbix ''' + + for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items(): + if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val): + return True + return False + +def host_in_zabbix(zab_hosts, usr_host): + ''' Check whether a particular user host is already in the + Zabbix list of hosts ''' + + for usr_hst_key, usr_hst_val in usr_host.items(): + for zab_host in zab_hosts: + if usr_hst_key in zab_host and \ + zab_host[usr_hst_key] == str(usr_hst_val): + return True + + return False + +def hostlist_in_zabbix(zab_hosts, usr_hosts): + ''' Check whether user-provided list of hosts are already in + the Zabbix action ''' + + if len(zab_hosts) != len(usr_hosts): + return False + + for usr_host in usr_hosts: + if not host_in_zabbix(zab_hosts, usr_host): + return False + + return True + +# We are comparing two lists of dictionaries (the one stored on zabbix and the +# one the user is providing). For each type of operation, determine whether there +# is a difference between what is stored on zabbix and what the user is providing. +# If there is a difference, we take the user-provided data for what needs to +# be stored/updated into zabbix. def operation_differences(zabbix_ops, user_ops): '''Determine the differences from user and zabbix for operations''' @@ -87,37 +175,41 @@ def operation_differences(zabbix_ops, user_ops): rval = {} for zab, user in zip(zabbix_ops, user_ops): - for key, val in user.items(): - if key == 'opconditions': - if len(zab[key]) != len(val): - rval[key] = val - break - for z_cond, u_cond in zip(zab[key], user[key]): - if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \ - ['conditiontype', 'operator', 'value']]): - rval[key] = val - break - elif key == 'opmessage': - # Verify each passed param matches - for op_msg_key, op_msg_val in val.items(): - if zab[key][op_msg_key] != str(op_msg_val): - rval[key] = val - break - - elif key == 'opmessage_grp': - zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab[key]]) - usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in val]) - if usr_grp_ids != zab_grp_ids: - rval[key] = val - - elif key == 'opmessage_usr': - zab_usr_ids = set([usr['userid'] for usr in zab[key]]) - usr_ids = set([usr['userid'] for usr in val]) - if usr_ids != zab_usr_ids: - rval[key] = val - - elif zab[key] != str(val): - rval[key] = val + for oper in user.keys(): + if oper == 'opconditions' and opconditions_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + elif oper == 'opmessage' and opmessage_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + elif oper == 'opmessage_grp' and opmessage_grp_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + elif oper == 'opmessage_usr' and opmessage_usr_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + elif oper == 'opcommand' and opcommand_diff(zab[oper], \ + user[oper]): + rval[oper] = user[oper] + + # opcommand_grp can be treated just like opcommand_hst + # as opcommand_grp[] is just a list of groups + elif oper == 'opcommand_hst' or oper == 'opcommand_grp': + if not hostlist_in_zabbix(zab[oper], user[oper]): + rval[oper] = user[oper] + + # if it's any other type of operation than the ones tested above + # just do a direct compare + elif oper not in ['opconditions', 'opmessage', 'opmessage_grp', + 'opmessage_usr', 'opcommand', 'opcommand_hst', + 'opcommand_grp'] \ + and str(zab[oper]) != str(user[oper]): + rval[oper] = user[oper] + return rval def get_users(zapi, users): @@ -288,7 +380,7 @@ def get_condition_type(event_source, inc_condition): def get_operation_type(inc_operation): ''' determine the correct operation type''' o_types = {'send message': 0, - 'remote command': 1, + 'remote command': OPERATION_REMOTE_COMMAND, 'add host': 2, 'remove host': 3, 'add to host group': 4, @@ -301,7 +393,64 @@ def get_operation_type(inc_operation): return o_types[inc_operation] -def get_action_operations(zapi, inc_operations): +def get_opcommand_type(opcommand_type): + ''' determine the opcommand type ''' + oc_types = {'custom script': CUSTOM_SCRIPT_ACTION, + 'IPMI': IPMI_ACTION, + 'SSH': SSH_ACTION, + 'Telnet': TELNET_ACTION, + 'global script': GLOBAL_SCRIPT_ACTION, + } + + return oc_types[opcommand_type] + +def get_execute_on(execute_on): + ''' determine the execution target ''' + e_types = {'zabbix agent': EXECUTE_ON_ZABBIX_AGENT, + 'zabbix server': EXECUTE_ON_ZABBIX_SERVER, + } + + return e_types[execute_on] + +def action_remote_command(ansible_module, zapi, operation): + ''' Process remote command type of actions ''' + + if 'type' not in operation['opcommand']: + ansible_module.exit_json(failed=True, changed=False, state='unknown', + results="No Operation Type provided") + + operation['opcommand']['type'] = get_opcommand_type(operation['opcommand']['type']) + + if operation['opcommand']['type'] == CUSTOM_SCRIPT_ACTION: + + if 'execute_on' in operation['opcommand']: + operation['opcommand']['execute_on'] = get_execute_on(operation['opcommand']['execute_on']) + + # custom script still requires the target hosts/groups to be set + operation['opcommand_hst'] = [] + operation['opcommand_grp'] = [] + for usr_host in operation['target_hosts']: + if usr_host['target_type'] == 'zabbix server': + # 0 = target host local/current host + operation['opcommand_hst'].append({'hostid': 0}) + elif usr_host['target_type'] == 'group': + group_name = usr_host['target'] + gid = get_host_group_id_by_name(zapi, group_name) + operation['opcommand_grp'].append({'groupid': gid}) + elif usr_host['target_type'] == 'host': + host_name = usr_host['target'] + hid = get_host_id_by_name(zapi, host_name) + operation['opcommand_hst'].append({'hostid': hid}) + + # 'target_hosts' is just to make it easier to build zbx_actions + # not part of ZabbixAPI + del operation['target_hosts'] + else: + ansible_module.exit_json(failed=True, changed=False, state='unknown', + results="Unsupported remote command type") + + +def get_action_operations(ansible_module, zapi, inc_operations): '''Convert the operations into syntax for api''' for operation in inc_operations: operation['operationtype'] = get_operation_type(operation['operationtype']) @@ -315,9 +464,8 @@ def get_action_operations(zapi, inc_operations): else: operation['opmessage']['default_msg'] = 0 - # NOT supported for remote commands - elif operation['operationtype'] == 1: - continue + elif operation['operationtype'] == OPERATION_REMOTE_COMMAND: + action_remote_command(ansible_module, zapi, operation) # Handle Operation conditions: # Currently there is only 1 available which @@ -464,7 +612,8 @@ def main(): if state == 'present': conditions = get_action_conditions(zapi, module.params['event_source'], module.params['conditions_filter']) - operations = get_action_operations(zapi, module.params['operations']) + operations = get_action_operations(module, zapi, + module.params['operations']) params = {'name': module.params['name'], 'esc_period': module.params['escalation_time'], 'eventsource': get_event_source(module.params['event_source']), diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml index 47749389e..61344357a 100644 --- a/roles/lib_zabbix/tasks/create_template.yml +++ b/roles/lib_zabbix/tasks/create_template.yml @@ -57,6 +57,7 @@ expression: "{{ item.expression }}" priority: "{{ item.priority }}" url: "{{ item.url | default(None, True) }}" + status: "{{ item.status | default('', True) }}" with_items: template.ztriggers when: template.ztriggers is defined diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml index fda23e05e..0a0cf1fae 100644 --- a/roles/nickhammond.logrotate/tasks/main.yml +++ b/roles/nickhammond.logrotate/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: nickhammond.logrotate | Install logrotate - action: "{{ansible_pkg_mgr}} pkg=logrotate state=present" + action: "{{ ansible_pkg_mgr }} name=logrotate state=present" - name: nickhammond.logrotate | Setup logrotate.d scripts template: diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml index 2b99f8bcd..05c7a5f93 100644 --- a/roles/openshift_ansible_inventory/tasks/main.yml +++ b/roles/openshift_ansible_inventory/tasks/main.yml @@ -1,21 +1,10 @@ --- -- yum: - name: "{{ item }}" - state: present - when: ansible_pkg_mgr == "yum" - with_items: - - openshift-ansible-inventory - - openshift-ansible-inventory-aws - - openshift-ansible-inventory-gce - -- dnf: - name: "{{ item }}" - state: present - when: ansible_pkg_mgr == "dnf" +- action: "{{ ansible_pkg_mgr }} name={{ item}} state=present" with_items: - openshift-ansible-inventory - openshift-ansible-inventory-aws - openshift-ansible-inventory-gce + when: not openshift.common.is_containerized | bool - name: copy: diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml new file mode 100644 index 000000000..1e8f8b719 --- /dev/null +++ b/roles/openshift_cli/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Jason DeTiberus + description: OpenShift Docker + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- { role: openshift_common } +- { role: docker } diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml new file mode 100644 index 000000000..2b53c9b8e --- /dev/null +++ b/roles/openshift_cli/tasks/main.yml @@ -0,0 +1,33 @@ +--- +- openshift_facts: + role: common + local_facts: + deployment_type: "{{ openshift_deployment_type }}" + cli_image: "{{ osm_image | default(None) }}" + +- name: Install clients + yum: pkg={{ openshift.common.service_type }}-clients state=installed + when: not openshift.common.is_containerized | bool + +- name: Pull CLI Image + command: > + docker pull {{ openshift.common.cli_image }} + when: openshift.common.is_containerized | bool + +- name: Create /usr/local/bin/openshift cli wrapper + template: + src: openshift.j2 + dest: /usr/local/bin/openshift + mode: 0755 + when: openshift.common.is_containerized | bool + +- name: Create client symlinks + file: + path: "{{ item }}" + state: link + src: /usr/local/bin/openshift + with_items: + - /usr/local/bin/oadm + - /usr/local/bin/oc + - /usr/local/bin/kubectl + when: openshift.common.is_containerized | bool
\ No newline at end of file diff --git a/roles/openshift_cli/templates/openshift.j2 b/roles/openshift_cli/templates/openshift.j2 new file mode 100644 index 000000000..a7c148a22 --- /dev/null +++ b/roles/openshift_cli/templates/openshift.j2 @@ -0,0 +1,23 @@ +#!/bin/bash +if [ ! -d ~/.kube ]; then + mkdir -m 0700 ~/.kube +fi +cmd=`basename $0` +user=`id -u` +group=`id -g` + +>&2 echo """ +================================================================================ +ATTENTION: You are running ${cmd} via a wrapper around 'docker run {{ openshift.common.cli_image }}'. +This wrapper is intended only to be used to bootstrap an environment. Please +install client tools on another host once you have granted cluster-admin +privileges to a user. +{% if openshift.common.deployment_type in ['openshift-enterprise','atomic-enterprise'] %} +See https://docs.openshift.com/enterprise/latest/cli_reference/get_started_cli.html +{% else %} +See https://docs.openshift.org/latest/cli_reference/get_started_cli.html +{% endif %} +================================================================================= +""" + +docker run -i --privileged --net=host --user=${user}:${group} -v ~/.kube:/root/.kube -v /tmp:/tmp -v {{ openshift.common.config_base}}:{{ openshift.common.config_base }} -e KUBECONFIG=/root/.kube/config --entrypoint ${cmd} --rm {{ openshift.common.cli_image }} "${@}" diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 77301144c..c9f745ed2 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -22,6 +22,7 @@ cluster_id: "{{ openshift_cluster_id | default('default') }}" debug_level: "{{ openshift_debug_level | default(2) }}" hostname: "{{ openshift_hostname | default(None) }}" + install_examples: "{{ openshift_install_examples | default(True) }}" ip: "{{ openshift_ip | default(None) }}" public_hostname: "{{ openshift_public_hostname | default(None) }}" public_ip: "{{ openshift_public_ip | default(None) }}" @@ -33,11 +34,19 @@ use_nuage: "{{ openshift_use_nuage | default(None) }}" use_manageiq: "{{ openshift_use_manageiq | default(None) }}" +- name: Install the base package for versioning + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') }} state=present" + when: not openshift.common.is_containerized | bool + +- name: Set version facts + openshift_facts: + # For enterprise versions < 3.1 and origin versions < 1.1 we want to set the # hostname by default. - set_fact: set_hostname_default: "{{ not openshift.common.version_greater_than_3_1_or_1_1 }}" - name: Set hostname - hostname: name={{ openshift.common.hostname }} + command: > + hostnamectl set-hostname {{ openshift.common.hostname }} when: openshift_set_hostname | default(set_hostname_default) | bool diff --git a/roles/openshift_docker/handlers/main.yml b/roles/openshift_docker/handlers/main.yml new file mode 100644 index 000000000..92a6c325f --- /dev/null +++ b/roles/openshift_docker/handlers/main.yml @@ -0,0 +1,6 @@ +--- + +- name: restart openshift_docker + service: + name: docker + state: restarted diff --git a/roles/openshift_docker/meta/main.yml b/roles/openshift_docker/meta/main.yml new file mode 100644 index 000000000..1e8f8b719 --- /dev/null +++ b/roles/openshift_docker/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Jason DeTiberus + description: OpenShift Docker + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- { role: openshift_common } +- { role: docker } diff --git a/roles/openshift_docker/tasks/main.yml b/roles/openshift_docker/tasks/main.yml new file mode 100644 index 000000000..5a285e773 --- /dev/null +++ b/roles/openshift_docker/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: Set docker facts + openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + deployment_type: "{{ openshift_deployment_type }}" + docker_additional_registries: "{{ docker_additional_registries }}" + docker_insecure_registries: "{{ docker_insecure_registries }}" + docker_blocked_registries: "{{ docker_blocked_registries }}" + - role: node + local_facts: + portal_net: "{{ openshift_master_portal_net | default(None) }}" + docker_log_driver: "{{ lookup( 'oo_option' , 'docker_log_driver' ) | default('',True) }}" + docker_log_options: "{{ lookup( 'oo_option' , 'docker_log_options' ) | default('',True) }}" + +- stat: path=/etc/sysconfig/docker + register: docker_check + +- name: Set registry params + lineinfile: + dest: /etc/sysconfig/docker + regexp: '^{{ item.reg_conf_var }}=.*$' + line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" + when: "'docker_additional_registries' in openshift.common and docker_check.stat.isreg" + with_items: + - reg_conf_var: ADD_REGISTRY + reg_fact_val: "{{ openshift.common.docker_additional_registries }}" + reg_flag: --add-registry + - reg_conf_var: BLOCK_REGISTRY + reg_fact_val: "{{ openshift.common.docker_blocked_registries }}" + reg_flag: --block-registry + - reg_conf_var: INSECURE_REGISTRY + reg_fact_val: "{{ openshift.common.docker_insecure_registries }}" + reg_flag: --insecure-registry + notify: + - restart openshift_docker + +# TODO: Enable secure registry when code available in origin +# TODO: perhaps move this to openshift_docker? +- name: Secure Registry and Logs Options + lineinfile: + dest: /etc/sysconfig/docker + regexp: '^OPTIONS=.*$' + line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} \ + {% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %} \ + {% if openshift.node.docker_log_driver is defined %} --log-driver {{ openshift.node.docker_log_driver }} {% endif %} \ + {% if openshift.node.docker_log_options is defined %} {{ openshift.node.docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}} {% endif %} '" + when: docker_check.stat.isreg + notify: + - restart openshift_docker diff --git a/roles/openshift_examples/README.md b/roles/openshift_examples/README.md index 7d8735a0a..6ddbe7017 100644 --- a/roles/openshift_examples/README.md +++ b/roles/openshift_examples/README.md @@ -11,6 +11,13 @@ ansible. Requirements ------------ +Facts +----- + +| Name | Default Value | Description | +-----------------------------|---------------|----------------------------------------| +| openshift_install_examples | true | Runs the role with the below variables | + Role Variables -------------- @@ -32,7 +39,7 @@ Example Playbook TODO ---- Currently we use `oc create -f` against various files and we accept non zero return code as a success -if (and only iff) stderr also contains the string 'already exists'. This means that if one object in the file exists already +if (and only if) stderr also contains the string 'already exists'. This means that if one object in the file exists already but others fail to create you won't be aware of the failure. This also means that we do not currently support updating existing objects. diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml index 0bc5d7750..6b9964aec 100644 --- a/roles/openshift_examples/defaults/main.yml +++ b/roles/openshift_examples/defaults/main.yml @@ -1,19 +1,19 @@ --- # By default install rhel and xpaas streams on enterprise installs -openshift_examples_load_centos: "{{ openshift_deployment_type not in ['enterprise','openshift-enterprise','atomic-enterprise','online'] }}" -openshift_examples_load_rhel: "{{ openshift_deployment_type in ['enterprise','openshift-enterprise','atomic-enterprise','online'] }}" +openshift_examples_load_centos: "{{ openshift_deployment_type == 'origin' }}" +openshift_examples_load_rhel: "{{ openshift_deployment_type != 'origin' }}" openshift_examples_load_db_templates: true -openshift_examples_load_xpaas: "{{ openshift_deployment_type in ['enterprise','openshift-enterprise','atomic-enterprise','online'] }}" +openshift_examples_load_xpaas: "{{ openshift_deployment_type != 'origin' }}" openshift_examples_load_quickstarts: true content_version: "{{ 'v1.1' if openshift.common.version_greater_than_3_1_or_1_1 else 'v1.0' }}" -examples_base: "/usr/share/openshift/examples" +examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized else '/usr/share/openshift' }}/examples" image_streams_base: "{{ examples_base }}/image-streams" centos_image_streams: "{{ image_streams_base}}/image-streams-centos7.json" rhel_image_streams: "{{ image_streams_base}}/image-streams-rhel7.json" db_templates_base: "{{ examples_base }}/db-templates" -xpaas_image_streams: "{{ examples_base }}/xpaas-streams/jboss-image-streams.json" +xpaas_image_streams: "{{ examples_base }}/xpaas-streams/" xpaas_templates_base: "{{ examples_base }}/xpaas-templates" quickstarts_base: "{{ examples_base }}/quickstart-templates" infrastructure_origin_base: "{{ examples_base }}/infrastructure-templates/origin" diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh index 090fb9042..01ce6d819 100755 --- a/roles/openshift_examples/examples-sync.sh +++ b/roles/openshift_examples/examples-sync.sh @@ -5,7 +5,7 @@ # # This script should be run from openshift-ansible/roles/openshift_examples -XPAAS_VERSION=ose-v1.1.0 +XPAAS_VERSION=ose-v1.2.0-1 ORIGIN_VERSION=v1.1 EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION} find ${EXAMPLES_BASE} -name '*.json' -delete @@ -37,6 +37,7 @@ cp dancer-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ cp cakephp-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ mv application-templates-${XPAAS_VERSION}/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/ find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \; +wget https://raw.githubusercontent.com/jboss-fuse/application-templates/master/fis-image-streams.json -O ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-deployer.yaml cp ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-*.yaml ${EXAMPLES_BASE}/infrastructure-templates/enterprise/ diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v1.1/xpaas-streams/fis-image-streams.json new file mode 100644 index 000000000..ed0e94bed --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-streams/fis-image-streams.json @@ -0,0 +1,56 @@ +{ + "kind": "List", + "apiVersion": "v1", + "metadata": { + "name": "fis-image-streams", + "annotations": { + "description": "ImageStream definitions for JBoss Fuse Integration Services." + } + }, + "items": [ + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "fis-java-openshift" + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-fuse-6/fis-java-openshift", + "tags": [ + { + "name": "1.0", + "annotations": { + "description": "JBoss Fuse Integration Services 6.2.1 Java S2I images.", + "iconClass": "icon-jboss", + "tags": "builder,jboss-fuse,java,xpaas", + "supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2", + "version": "1.0" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "fis-karaf-openshift" + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-fuse-6/fis-karaf-openshift", + "tags": [ + { + "name": "1.0", + "annotations": { + "description": "JBoss Fuse Integration Services 6.2.1 Karaf S2I images.", + "iconClass": "icon-jboss", + "tags": "builder,jboss-fuse,java,karaf,xpaas", + "supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2", + "version": "1.0" + } + } + ] + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json index aaf5569ae..64b435205 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json @@ -28,6 +28,18 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.1" } + }, + { + "name": "1.2", + "annotations": { + "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.", + "iconClass": "icon-jboss", + "tags": "builder,tomcat,tomcat7,java,jboss,xpaas", + "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.2", + "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "sampleContextDir": "tomcat-websocket-chat", + "version": "1.2" + } } ] } @@ -52,6 +64,18 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.1" } + }, + { + "name": "1.2", + "annotations": { + "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.", + "iconClass": "icon-jboss", + "tags": "builder,tomcat,tomcat8,java,jboss,xpaas", + "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.2", + "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "sampleContextDir": "tomcat-websocket-chat", + "version": "1.2" + } } ] } @@ -77,6 +101,66 @@ "sampleRef": "6.4.x", "version": "1.1" } + }, + { + "name": "1.2", + "annotations": { + "description": "JBoss EAP 6.4 S2I images.", + "iconClass": "icon-jboss", + "tags": "builder,eap,javaee,java,jboss,xpaas", + "supports":"eap:6.4,javaee:6,java:8,xpaas:1.2", + "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git", + "sampleContextDir": "kitchensink", + "sampleRef": "6.4.x", + "version": "1.2" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "jboss-decisionserver62-openshift" + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver62-openshift", + "tags": [ + { + "name": "1.2", + "annotations": { + "description": "Decision Server 6.2 S2I images.", + "iconClass": "icon-jboss", + "tags": "builder,decisionserver,java,xpaas", + "supports":"decisionserver:6.2,java:8,xpaas:1.2", + "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "sampleContextDir": "decisionserver/hellorules", + "sampleRef": "master", + "version": "1.2" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "jboss-datagrid65-openshift" + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift", + "tags": [ + { + "name": "1.2", + "annotations": { + "description": "JBoss Data Grid 6.5 S2I images.", + "iconClass": "icon-jboss", + "tags": "datagrid,java,jboss,xpaas", + "supports":"datagrid:6.5,java:8,xpaas:1.2", + "version": "1.2" + } } ] } @@ -99,6 +183,16 @@ "supports":"amq:6.2,messaging,xpaas:1.1", "version": "1.1" } + }, + { + "name": "1.2", + "annotations": { + "description": "JBoss A-MQ 6.2 broker image.", + "iconClass": "icon-jboss", + "tags": "messaging,amq,jboss,xpaas", + "supports":"amq:6.2,messaging,xpaas:1.2", + "version": "1.2" + } } ] } diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json index 3fd04c28c..2b1680755 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json @@ -6,13 +6,13 @@ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.", "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "amq62-basic" }, "labels": { "template": "amq62-basic", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -54,20 +54,6 @@ "required": false }, { - "description": "User name for admin user. If left empty, it will be generated.", - "name": "AMQ_ADMIN_USERNAME", - "from": "user[a-zA-Z0-9]{3}", - "generate": "expression", - "required": true - }, - { - "description": "Password for admin user. If left empty, it will be generated.", - "name": "AMQ_ADMIN_PASSWORD", - "from": "[a-zA-Z0-9]{8}", - "generate": "expression", - "required": true - }, - { "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.", "name": "AMQ_MESH_DISCOVERY_TYPE", "value": "kube", @@ -207,7 +193,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-62:1.1" + "name": "jboss-amq-62:1.2" } } }, @@ -239,12 +225,17 @@ "command": [ "/bin/bash", "-c", - "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'" + "/opt/amq/bin/readinessProbe.sh" ] } }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "amqp", "containerPort": 5672, "protocol": "TCP" @@ -287,14 +278,6 @@ "value": "${MQ_TOPICS}" }, { - "name": "AMQ_ADMIN_USERNAME", - "value": "${AMQ_ADMIN_USERNAME}" - }, - { - "name": "AMQ_ADMIN_PASSWORD", - "value": "${AMQ_ADMIN_PASSWORD}" - }, - { "name": "AMQ_MESH_DISCOVERY_TYPE", "value": "${AMQ_MESH_DISCOVERY_TYPE}" }, diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json index aa9e716cf..0755fef45 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json @@ -6,13 +6,13 @@ "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.", "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "amq62-persistent-ssl" }, "labels": { "template": "amq62-persistent-ssl", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -60,20 +60,6 @@ "required": false }, { - "description": "User name for admin user. If left empty, it will be generated.", - "name": "AMQ_ADMIN_USERNAME", - "from": "user[a-zA-Z0-9]{3}", - "generate": "expression", - "required": true - }, - { - "description": "Password for admin user. If left empty, it will be generated.", - "name": "AMQ_ADMIN_PASSWORD", - "from": "[a-zA-Z0-9]{8}", - "generate": "expression", - "required": true - }, - { "description": "Name of a secret containing SSL related files", "name": "AMQ_SECRET", "value": "amq-app-secret", @@ -333,7 +319,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-62:1.1" + "name": "jboss-amq-62:1.2" } } }, @@ -377,12 +363,17 @@ "command": [ "/bin/bash", "-c", - "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'" + "/opt/amq/bin/readinessProbe.sh" ] } }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "amqp", "containerPort": 5672, "protocol": "TCP" @@ -445,14 +436,6 @@ "value": "${MQ_TOPICS}" }, { - "name": "AMQ_ADMIN_USERNAME", - "value": "${AMQ_ADMIN_USERNAME}" - }, - { - "name": "AMQ_ADMIN_PASSWORD", - "value": "${AMQ_ADMIN_PASSWORD}" - }, - { "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR", "value": "/etc/amq-secret-volume" }, diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json index 3a2db3ce9..a8b3d5714 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json @@ -6,13 +6,13 @@ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.", "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "amq62-persistent" }, "labels": { "template": "amq62-persistent", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -60,20 +60,6 @@ "required": false }, { - "description": "User name for admin user. If left empty, it will be generated.", - "name": "AMQ_ADMIN_USERNAME", - "from": "user[a-zA-Z0-9]{3}", - "generate": "expression", - "required": true - }, - { - "description": "Password for admin user. If left empty, it will be generated.", - "name": "AMQ_ADMIN_PASSWORD", - "from": "[a-zA-Z0-9]{8}", - "generate": "expression", - "required": true - }, - { "description": "The A-MQ storage usage limit", "name": "AMQ_STORAGE_USAGE_LIMIT", "value": "100 gb", @@ -207,7 +193,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-62:1.1" + "name": "jboss-amq-62:1.2" } } }, @@ -245,12 +231,17 @@ "command": [ "/bin/bash", "-c", - "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'" + "/opt/amq/bin/readinessProbe.sh" ] } }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "amqp", "containerPort": 5672, "protocol": "TCP" @@ -293,14 +284,6 @@ "value": "${MQ_TOPICS}" }, { - "name": "AMQ_ADMIN_USERNAME", - "value": "${AMQ_ADMIN_USERNAME}" - }, - { - "name": "AMQ_ADMIN_PASSWORD", - "value": "${AMQ_ADMIN_PASSWORD}" - }, - { "name": "AMQ_STORAGE_USAGE_LIMIT", "value": "${AMQ_STORAGE_USAGE_LIMIT}" } diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json index f61fb24c2..ced360b09 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json @@ -6,13 +6,13 @@ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.", "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "amq62-ssl" }, "labels": { "template": "amq62-ssl", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -54,20 +54,6 @@ "required": false }, { - "description": "User name for admin user. If left empty, it will be generated.", - "name": "AMQ_ADMIN_USERNAME", - "from": "user[a-zA-Z0-9]{3}", - "generate": "expression", - "required": true - }, - { - "description": "Password for admin user. If left empty, it will be generated.", - "name": "AMQ_ADMIN_PASSWORD", - "from": "[a-zA-Z0-9]{8}", - "generate": "expression", - "required": true - }, - { "description": "Name of a secret containing SSL related files", "name": "AMQ_SECRET", "value": "amq-app-secret", @@ -333,7 +319,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-62:1.1" + "name": "jboss-amq-62:1.2" } } }, @@ -373,12 +359,17 @@ "command": [ "/bin/bash", "-c", - "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'" + "/opt/amq/bin/readinessProbe.sh" ] } }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "amqp", "containerPort": 5672, "protocol": "TCP" @@ -441,14 +432,6 @@ "value": "${MQ_TOPICS}" }, { - "name": "AMQ_ADMIN_USERNAME", - "value": "${AMQ_ADMIN_USERNAME}" - }, - { - "name": "AMQ_ADMIN_PASSWORD", - "value": "${AMQ_ADMIN_PASSWORD}" - }, - { "name": "AMQ_MESH_DISCOVERY_TYPE", "value": "${AMQ_MESH_DISCOVERY_TYPE}" }, diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-basic.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-basic.json new file mode 100644 index 000000000..56e76016f --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-basic.json @@ -0,0 +1,332 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for JDG 6.5 applications.", + "tags": "datagrid,jboss,xpaas", + "version": "1.2.0" + }, + "name": "datagrid65-basic" + }, + "labels": { + "template": "datagrid65-basic", + "xpaas": "1.2.0" + }, + "parameters": [ + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "datagrid-app", + "required": true + }, + { + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "User name for JDG user.", + "name": "USERNAME", + "value": "", + "required": false + }, + { + "description": "Password for JDG user.", + "name": "PASSWORD", + "value": "", + "required": false + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')", + "name": "INFINISPAN_CONNECTORS", + "value": "hotrod,memcached,rest", + "required": false + }, + { + "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.", + "name": "CACHE_NAMES", + "value": "", + "required": false + }, + { + "description": "", + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "", + "required": false + }, + { + "description": "The name of the cache to expose through this memcached connector (defaults to 'default')", + "name": "MEMCACHED_CACHE", + "value": "default", + "required": false + }, + { + "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint", + "name": "REST_SECURITY_DOMAIN", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11211, + "targetPort": 11211 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-memcached", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Memcached service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11333, + "targetPort": 11333 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-hotrod", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Hot Rod service for clustered applications." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-datagrid65-openshift:1.2" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "jboss-datagrid65-openshift", + "imagePullPolicy": "Always", + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + }, + { + "name": "memcached", + "containerPort": 11211, + "protocol": "TCP" + }, + { + "name": "hotrod", + "containerPort": 11222, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "USERNAME", + "value": "${USERNAME}" + }, + { + "name": "PASSWORD", + "value": "${PASSWORD}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "INFINISPAN_CONNECTORS", + "value": "${INFINISPAN_CONNECTORS}" + }, + { + "name": "CACHE_NAMES", + "value": "${CACHE_NAMES}" + }, + { + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}" + }, + { + "name": "HOTROD_SERVICE_NAME", + "value": "${APPLICATION_NAME}-hotrod" + }, + { + "name": "MEMCACHED_CACHE", + "value": "${MEMCACHED_CACHE}" + }, + { + "name": "REST_SECURITY_DOMAIN", + "value": "${REST_SECURITY_DOMAIN}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-https.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-https.json new file mode 100644 index 000000000..033e70063 --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-https.json @@ -0,0 +1,501 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for JDG 6.5 applications.", + "tags": "datagrid,jboss,xpaas", + "version": "1.2.0" + }, + "name": "datagrid65-https" + }, + "labels": { + "template": "datagrid65-https", + "xpaas": "1.2.0" + }, + "parameters": [ + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "datagrid-app", + "required": true + }, + { + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "description": "User name for JDG user.", + "name": "USERNAME", + "value": "", + "required": false + }, + { + "description": "Password for JDG user.", + "name": "PASSWORD", + "value": "", + "required": false + }, + { + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "datagrid-app-secret", + "required": true + }, + { + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')", + "name": "INFINISPAN_CONNECTORS", + "value": "hotrod,memcached,rest", + "required": false + }, + { + "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.", + "name": "CACHE_NAMES", + "value": "", + "required": false + }, + { + "description": "", + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "", + "required": false + }, + { + "description": "The name of the cache to expose through this memcached connector (defaults to 'default')", + "name": "MEMCACHED_CACHE", + "value": "default", + "required": false + }, + { + "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint", + "name": "REST_SECURITY_DOMAIN", + "value": "", + "required": false + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "datagrid-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11211, + "targetPort": 11211 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-memcached", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Memcached service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11333, + "targetPort": 11333 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-hotrod", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Hot Rod service for clustered applications." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-datagrid65-openshift:1.2" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "datagrid-service-account", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "jboss-datagrid65-openshift", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "datagrid-keystore-volume", + "mountPath": "/etc/datagrid-secret-volume", + "readOnly": true + }, + { + "name": "datagrid-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + }, + { + "name": "memcached", + "containerPort": 11211, + "protocol": "TCP" + }, + { + "name": "hotrod", + "containerPort": 11222, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "USERNAME", + "value": "${USERNAME}" + }, + { + "name": "PASSWORD", + "value": "${PASSWORD}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/datagrid-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "INFINISPAN_CONNECTORS", + "value": "${INFINISPAN_CONNECTORS}" + }, + { + "name": "CACHE_NAMES", + "value": "${CACHE_NAMES}" + }, + { + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}" + }, + { + "name": "HOTROD_SERVICE_NAME", + "value": "${APPLICATION_NAME}-hotrod" + }, + { + "name": "MEMCACHED_CACHE", + "value": "${MEMCACHED_CACHE}" + }, + { + "name": "REST_SECURITY_DOMAIN", + "value": "${REST_SECURITY_DOMAIN}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "datagrid-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "datagrid-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-mysql-persistent.json new file mode 100644 index 000000000..05bc7c236 --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-mysql-persistent.json @@ -0,0 +1,779 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for JDG 6.5 and MySQL applications with persistent storage.", + "tags": "datagrid,jboss,xpaas", + "version": "1.2.0" + }, + "name": "datagrid65-mysql-persistent" + }, + "labels": { + "template": "datagrid65-mysql-persistent", + "xpaas": "1.2.0" + }, + "parameters": [ + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "datagrid-app", + "required": true + }, + { + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "description": "User name for JDG user.", + "name": "USERNAME", + "value": "", + "required": false + }, + { + "description": "Password for JDG user.", + "name": "PASSWORD", + "value": "", + "required": false + }, + { + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "datagrid-app-secret", + "required": true + }, + { + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql", + "name": "DB_JNDI", + "value": "java:/jboss/datasources/mysql", + "required": false + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "description": "Sets how the table names are stored and compared.", + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "required": false + }, + { + "description": "The maximum permitted number of simultaneous client connections.", + "name": "MYSQL_MAX_CONNECTIONS", + "required": false + }, + { + "description": "The minimum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MIN_WORD_LEN", + "required": false + }, + { + "description": "The maximum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MAX_WORD_LEN", + "required": false + }, + { + "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", + "name": "MYSQL_AIO", + "required": false + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')", + "name": "INFINISPAN_CONNECTORS", + "value": "hotrod,memcached,rest", + "required": false + }, + { + "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.", + "name": "CACHE_NAMES", + "value": "", + "required": false + }, + { + "description": "", + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "", + "required": false + }, + { + "description": "The name of the cache to expose through this memcached connector (defaults to 'default')", + "name": "MEMCACHED_CACHE", + "value": "default", + "required": false + }, + { + "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint", + "name": "REST_SECURITY_DOMAIN", + "value": "", + "required": false + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "datagrid-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11211, + "targetPort": 11211 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-memcached", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Memcached service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11333, + "targetPort": 11333 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-hotrod", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Hot Rod service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-datagrid65-openshift:1.2" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "datagrid-service-account", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "jboss-datagrid65-openshift", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "datagrid-keystore-volume", + "mountPath": "/etc/datagrid-secret-volume", + "readOnly": true + }, + { + "name": "datagrid-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + }, + { + "name": "memcached", + "containerPort": 11211, + "protocol": "TCP" + }, + { + "name": "hotrod", + "containerPort": 11222, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "USERNAME", + "value": "${USERNAME}" + }, + { + "name": "PASSWORD", + "value": "${PASSWORD}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/datagrid-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "DEFAULT_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "DEFAULT_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "MEMCACHED_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "MEMCACHED_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "INFINISPAN_CONNECTORS", + "value": "${INFINISPAN_CONNECTORS}" + }, + { + "name": "CACHE_NAMES", + "value": "${CACHE_NAMES}" + }, + { + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}" + }, + { + "name": "HOTROD_SERVICE_NAME", + "value": "${APPLICATION_NAME}-hotrod" + }, + { + "name": "MEMCACHED_CACHE", + "value": "${MEMCACHED_CACHE}" + }, + { + "name": "REST_SECURITY_DOMAIN", + "value": "${REST_SECURITY_DOMAIN}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "datagrid-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "datagrid-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mysql" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "mysql:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mysql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mysql", + "image": "mysql", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 3306, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mysql/data", + "name": "${APPLICATION_NAME}-mysql-pvol" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}" + }, + { + "name": "MYSQL_MAX_CONNECTIONS", + "value": "${MYSQL_MAX_CONNECTIONS}" + }, + { + "name": "MYSQL_FT_MIN_WORD_LEN", + "value": "${MYSQL_FT_MIN_WORD_LEN}" + }, + { + "name": "MYSQL_FT_MAX_WORD_LEN", + "value": "${MYSQL_FT_MAX_WORD_LEN}" + }, + { + "name": "MYSQL_AIO", + "value": "${MYSQL_AIO}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mysql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mysql-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mysql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-mysql.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-mysql.json new file mode 100644 index 000000000..1856c8dc2 --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-mysql.json @@ -0,0 +1,739 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for JDG 6.5 and MySQL applications.", + "tags": "datagrid,jboss,xpaas", + "version": "1.2.0" + }, + "name": "datagrid65-mysql" + }, + "labels": { + "template": "datagrid65-mysql", + "xpaas": "1.2.0" + }, + "parameters": [ + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "datagrid-app", + "required": true + }, + { + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "description": "User name for JDG user.", + "name": "USERNAME", + "value": "", + "required": false + }, + { + "description": "Password for JDG user.", + "name": "PASSWORD", + "value": "", + "required": false + }, + { + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "datagrid-app-secret", + "required": true + }, + { + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql", + "name": "DB_JNDI", + "value": "java:/jboss/datasources/mysql", + "required": false + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "description": "Sets how the table names are stored and compared.", + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "required": false + }, + { + "description": "The maximum permitted number of simultaneous client connections.", + "name": "MYSQL_MAX_CONNECTIONS", + "required": false + }, + { + "description": "The minimum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MIN_WORD_LEN", + "required": false + }, + { + "description": "The maximum length of the word to be included in a FULLTEXT index.", + "name": "MYSQL_FT_MAX_WORD_LEN", + "required": false + }, + { + "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", + "name": "MYSQL_AIO", + "required": false + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')", + "name": "INFINISPAN_CONNECTORS", + "value": "hotrod,memcached,rest", + "required": false + }, + { + "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.", + "name": "CACHE_NAMES", + "value": "", + "required": false + }, + { + "description": "", + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "", + "required": false + }, + { + "description": "The name of the cache to expose through this memcached connector (defaults to 'default')", + "name": "MEMCACHED_CACHE", + "value": "default", + "required": false + }, + { + "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint", + "name": "REST_SECURITY_DOMAIN", + "value": "", + "required": false + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "datagrid-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11211, + "targetPort": 11211 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-memcached", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Memcached service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11333, + "targetPort": 11333 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-hotrod", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Hot Rod service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 3306, + "targetPort": 3306 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-datagrid65-openshift:1.2" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "datagrid-service-account", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "jboss-datagrid65-openshift", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "datagrid-keystore-volume", + "mountPath": "/etc/datagrid-secret-volume", + "readOnly": true + }, + { + "name": "datagrid-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + }, + { + "name": "memcached", + "containerPort": 11211, + "protocol": "TCP" + }, + { + "name": "hotrod", + "containerPort": 11222, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "USERNAME", + "value": "${USERNAME}" + }, + { + "name": "PASSWORD", + "value": "${PASSWORD}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/datagrid-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-mysql=DB" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "DEFAULT_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "DEFAULT_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "MEMCACHED_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "MEMCACHED_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "INFINISPAN_CONNECTORS", + "value": "${INFINISPAN_CONNECTORS}" + }, + { + "name": "CACHE_NAMES", + "value": "${CACHE_NAMES}" + }, + { + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}" + }, + { + "name": "HOTROD_SERVICE_NAME", + "value": "${APPLICATION_NAME}-hotrod" + }, + { + "name": "MEMCACHED_CACHE", + "value": "${MEMCACHED_CACHE}" + }, + { + "name": "REST_SECURITY_DOMAIN", + "value": "${REST_SECURITY_DOMAIN}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "datagrid-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "datagrid-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-mysql" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "mysql:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-mysql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-mysql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-mysql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-mysql", + "image": "mysql", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 3306, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "MYSQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "MYSQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "MYSQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}" + }, + { + "name": "MYSQL_MAX_CONNECTIONS", + "value": "${MYSQL_MAX_CONNECTIONS}" + }, + { + "name": "MYSQL_FT_MIN_WORD_LEN", + "value": "${MYSQL_FT_MIN_WORD_LEN}" + }, + { + "name": "MYSQL_FT_MAX_WORD_LEN", + "value": "${MYSQL_FT_MAX_WORD_LEN}" + }, + { + "name": "MYSQL_AIO", + "value": "${MYSQL_AIO}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-postgresql-persistent.json new file mode 100644 index 000000000..10d0f77ce --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-postgresql-persistent.json @@ -0,0 +1,752 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for JDG 6.5 and PostgreSQL applications with persistent storage.", + "tags": "datagrid,jboss,xpaas", + "version": "1.2.0" + }, + "name": "datagrid65-postgresql-persistent" + }, + "labels": { + "template": "datagrid65-postgresql-persistent", + "xpaas": "1.2.0" + }, + "parameters": [ + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "datagrid-app", + "required": true + }, + { + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "description": "User name for JDG user.", + "name": "USERNAME", + "value": "", + "required": false + }, + { + "description": "Password for JDG user.", + "name": "PASSWORD", + "value": "", + "required": false + }, + { + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "datagrid-app-secret", + "required": true + }, + { + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql", + "name": "DB_JNDI", + "value": "java:jboss/datasources/postgresql", + "required": false + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", + "name": "POSTGRESQL_MAX_CONNECTIONS", + "required": false + }, + { + "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", + "name": "POSTGRESQL_SHARED_BUFFERS", + "required": false + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')", + "name": "INFINISPAN_CONNECTORS", + "value": "hotrod,memcached,rest", + "required": false + }, + { + "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.", + "name": "CACHE_NAMES", + "value": "", + "required": false + }, + { + "description": "", + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "", + "required": false + }, + { + "description": "The name of the cache to expose through this memcached connector (defaults to 'default')", + "name": "MEMCACHED_CACHE", + "value": "default", + "required": false + }, + { + "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint", + "name": "REST_SECURITY_DOMAIN", + "value": "", + "required": false + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "datagrid-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11211, + "targetPort": 11211 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-memcached", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Memcached service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11333, + "targetPort": 11333 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-hotrod", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Hot Rod service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-datagrid65-openshift:1.2" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "datagrid-service-account", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "jboss-datagrid65-openshift", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "datagrid-keystore-volume", + "mountPath": "/etc/datagrid-secret-volume", + "readOnly": true + }, + { + "name": "datagrid-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + }, + { + "name": "memcached", + "containerPort": 11211, + "protocol": "TCP" + }, + { + "name": "hotrod", + "containerPort": 11222, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "USERNAME", + "value": "${USERNAME}" + }, + { + "name": "PASSWORD", + "value": "${PASSWORD}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/datagrid-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "DEFAULT_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "DEFAULT_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "MEMCACHED_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "MEMCACHED_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "INFINISPAN_CONNECTORS", + "value": "${INFINISPAN_CONNECTORS}" + }, + { + "name": "CACHE_NAMES", + "value": "${CACHE_NAMES}" + }, + { + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}" + }, + { + "name": "HOTROD_SERVICE_NAME", + "value": "${APPLICATION_NAME}-hotrod" + }, + { + "name": "MEMCACHED_CACHE", + "value": "${MEMCACHED_CACHE}" + }, + { + "name": "REST_SECURITY_DOMAIN", + "value": "${REST_SECURITY_DOMAIN}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "datagrid-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "datagrid-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "postgresql:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-postgresql", + "image": "postgresql", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 5432, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/var/lib/pgsql/data", + "name": "${APPLICATION_NAME}-postgresql-pvol" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-postgresql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-postgresql-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-postgresql.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-postgresql.json new file mode 100644 index 000000000..9dd378f92 --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/datagrid65-postgresql.json @@ -0,0 +1,712 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "iconClass": "icon-jboss", + "description": "Application template for JDG 6.5 and PostgreSQL applications built using.", + "tags": "datagrid,jboss,xpaas", + "version": "1.2.0" + }, + "name": "datagrid65-postgresql" + }, + "labels": { + "template": "datagrid65-postgresql", + "xpaas": "1.2.0" + }, + "parameters": [ + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "datagrid-app", + "required": true + }, + { + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "description": "User name for JDG user.", + "name": "USERNAME", + "value": "", + "required": false + }, + { + "description": "Password for JDG user.", + "name": "PASSWORD", + "value": "", + "required": false + }, + { + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "datagrid-app-secret", + "required": true + }, + { + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "", + "required": false + }, + { + "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql", + "name": "DB_JNDI", + "value": "java:jboss/datasources/postgresql", + "required": false + }, + { + "description": "Database name", + "name": "DB_DATABASE", + "value": "root", + "required": true + }, + { + "description": "Database user name", + "name": "DB_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "description": "Database user password", + "name": "DB_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "Sets xa-pool/min-pool-size for the configured datasource.", + "name": "DB_MIN_POOL_SIZE", + "required": false + }, + { + "description": "Sets xa-pool/max-pool-size for the configured datasource.", + "name": "DB_MAX_POOL_SIZE", + "required": false + }, + { + "description": "Sets transaction-isolation for the configured datasource.", + "name": "DB_TX_ISOLATION", + "required": false + }, + { + "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", + "name": "POSTGRESQL_MAX_CONNECTIONS", + "required": false + }, + { + "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", + "name": "POSTGRESQL_SHARED_BUFFERS", + "required": false + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + }, + { + "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')", + "name": "INFINISPAN_CONNECTORS", + "value": "hotrod,memcached,rest", + "required": false + }, + { + "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configurd for each entry.", + "name": "CACHE_NAMES", + "value": "", + "required": false + }, + { + "description": "", + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "", + "required": false + }, + { + "description": "The name of the cache to expose through this memcached connector (defaults to 'default')", + "name": "MEMCACHED_CACHE", + "value": "default", + "required": false + }, + { + "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint", + "name": "REST_SECURITY_DOMAIN", + "value": "", + "required": false + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "datagrid-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11211, + "targetPort": 11211 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-memcached", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Memcached service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 11333, + "targetPort": 11333 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-hotrod", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Hot Rod service for clustered applications." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The database server's port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-datagrid65-openshift:1.2" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "datagrid-service-account", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "jboss-datagrid65-openshift", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "datagrid-keystore-volume", + "mountPath": "/etc/datagrid-secret-volume", + "readOnly": true + }, + { + "name": "datagrid-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/datagrid/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + }, + { + "name": "memcached", + "containerPort": 11211, + "protocol": "TCP" + }, + { + "name": "hotrod", + "containerPort": 11222, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "USERNAME", + "value": "${USERNAME}" + }, + { + "name": "PASSWORD", + "value": "${PASSWORD}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/datagrid-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "DB_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_JNDI", + "value": "${DB_JNDI}" + }, + { + "name": "DB_USERNAME", + "value": "${DB_USERNAME}" + }, + { + "name": "DB_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "DB_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "TX_DATABASE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-postgresql=DB" + }, + { + "name": "DB_MIN_POOL_SIZE", + "value": "${DB_MIN_POOL_SIZE}" + }, + { + "name": "DB_MAX_POOL_SIZE", + "value": "${DB_MAX_POOL_SIZE}" + }, + { + "name": "DB_TX_ISOLATION", + "value": "${DB_TX_ISOLATION}" + }, + { + "name": "DEFAULT_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "DEFAULT_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "MEMCACHED_JDBC_STORE_TYPE", + "value": "string" + }, + { + "name": "MEMCACHED_JDBC_STORE_DATASOURCE", + "value": "${DB_JNDI}" + }, + { + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" + }, + { + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "INFINISPAN_CONNECTORS", + "value": "${INFINISPAN_CONNECTORS}" + }, + { + "name": "CACHE_NAMES", + "value": "${CACHE_NAMES}" + }, + { + "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH", + "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}" + }, + { + "name": "HOTROD_SERVICE_NAME", + "value": "${APPLICATION_NAME}-hotrod" + }, + { + "name": "MEMCACHED_CACHE", + "value": "${MEMCACHED_CACHE}" + }, + { + "name": "REST_SECURITY_DOMAIN", + "value": "${REST_SECURITY_DOMAIN}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "datagrid-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "datagrid-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "postgresql:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-postgresql", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-postgresql", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "${APPLICATION_NAME}-postgresql", + "image": "postgresql", + "imagePullPolicy": "Always", + "ports": [ + { + "containerPort": 5432, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "${DB_USERNAME}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${DB_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${DB_DATABASE}" + }, + { + "name": "POSTGRESQL_MAX_CONNECTIONS", + "value": "${POSTGRESQL_MAX_CONNECTIONS}" + }, + { + "name": "POSTGRESQL_SHARED_BUFFERS", + "value": "${POSTGRESQL_SHARED_BUFFERS}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-amq-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-amq-s2i.json new file mode 100644 index 000000000..0c82eaa61 --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-amq-s2i.json @@ -0,0 +1,684 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for BRMS Realtime Decision Server 6 A-MQ applications built using S2I.", + "iconClass": "icon-jboss", + "tags": "decisionserver,amq,java,messaging,jboss,xpaas", + "version": "1.2.0" + }, + "name": "decisionserver62-amq-s2i" + }, + "labels": { + "template": "decisionserver62-amq-s2i", + "xpaas": "1.2.0" + }, + "parameters": [ + { + "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2", + "name": "KIE_CONTAINER_DEPLOYMENT", + "value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final", + "required": false + }, + { + "description": "The user name to access the KIE Server REST or JMS interface.", + "name": "KIE_SERVER_USER", + "value": "kieserver", + "required": false + }, + { + "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).", + "name": "KIE_SERVER_PASSWORD", + "from": "[a-zA-Z]{6}[0-9]{1}!", + "generate": "expression", + "required": false + }, + { + "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.", + "name": "KIE_SERVER_DOMAIN", + "value": "other", + "required": false + }, + { + "description": "JNDI name of response queue for JMS.", + "name": "KIE_SERVER_JMS_QUEUES_RESPONSE", + "value": "queue/KIE.SERVER.RESPONSE", + "required": false + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "kie-app", + "required": true + }, + { + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "required": true + }, + { + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "1.2", + "required": false + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "decisionserver/hellorules", + "required": false + }, + { + "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA", + "name": "MQ_JNDI", + "value": "java:/JmsXA", + "required": false + }, + { + "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.", + "name": "MQ_PROTOCOL", + "value": "openwire", + "required": false + }, + { + "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.", + "name": "MQ_QUEUES", + "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE", + "required": false + }, + { + "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.", + "name": "MQ_TOPICS", + "value": "", + "required": false + }, + { + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "decisionserver-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "jboss", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "mykeystorepass", + "required": false + }, + { + "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", + "name": "MQ_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": false + }, + { + "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", + "name": "MQ_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": false + }, + { + "description": "User name for broker admin. If left empty, it will be generated.", + "name": "AMQ_ADMIN_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "description": "Password for broker admin. If left empty, it will be generated.", + "name": "AMQ_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's HTTPS port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61616, + "targetPort": 61616 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-tcp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's OpenWire port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTPS service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "KIE_CONTAINER_DEPLOYMENT", + "value": "${KIE_CONTAINER_DEPLOYMENT}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-decisionserver62-openshift:1.2" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "decisionserver-service-account", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "decisionserver-keystore-volume", + "mountPath": "/etc/decisionserver-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "KIE_CONTAINER_DEPLOYMENT", + "value": "${KIE_CONTAINER_DEPLOYMENT}" + }, + { + "name": "KIE_SERVER_USER", + "value": "${KIE_SERVER_USER}" + }, + { + "name": "KIE_SERVER_PASSWORD", + "value": "${KIE_SERVER_PASSWORD}" + }, + { + "name": "KIE_SERVER_DOMAIN", + "value": "${KIE_SERVER_DOMAIN}" + }, + { + "name": "KIE_SERVER_JMS_QUEUES_RESPONSE", + "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}" + }, + { + "name": "MQ_SERVICE_PREFIX_MAPPING", + "value": "${APPLICATION_NAME}-amq=MQ" + }, + { + "name": "MQ_JNDI", + "value": "${MQ_JNDI}" + }, + { + "name": "MQ_USERNAME", + "value": "${MQ_USERNAME}" + }, + { + "name": "MQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "MQ_PROTOCOL", + "value": "tcp" + }, + { + "name": "MQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "MQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/decisionserver-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + } + ] + } + ], + "volumes": [ + { + "name": "decisionserver-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-amq" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-62:1.2" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-amq", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-amq", + "image": "jboss-amq-62", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/amq/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "amqp", + "containerPort": 5672, + "protocol": "TCP" + }, + { + "name": "amqp-ssl", + "containerPort": 5671, + "protocol": "TCP" + }, + { + "name": "mqtt", + "containerPort": 1883, + "protocol": "TCP" + }, + { + "name": "stomp", + "containerPort": 61613, + "protocol": "TCP" + }, + { + "name": "stomp-ssl", + "containerPort": 61612, + "protocol": "TCP" + }, + { + "name": "tcp", + "containerPort": 61616, + "protocol": "TCP" + }, + { + "name": "tcp-ssl", + "containerPort": 61617, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_TRANSPORTS", + "value": "${MQ_PROTOCOL}" + }, + { + "name": "AMQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "AMQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "AMQ_ADMIN_USERNAME", + "value": "${AMQ_ADMIN_USERNAME}" + }, + { + "name": "AMQ_ADMIN_PASSWORD", + "value": "${AMQ_ADMIN_PASSWORD}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-basic-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-basic-s2i.json new file mode 100644 index 000000000..097720375 --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-basic-s2i.json @@ -0,0 +1,344 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for BRMS Realtime Decision Server 6 applications built using S2I.", + "iconClass": "icon-jboss", + "tags": "decisionserver,java,jboss,xpaas", + "version": "1.2.0" + }, + "name": "decisionserver62-basic-s2i" + }, + "labels": { + "template": "decisionserver62-basic-s2i", + "xpaas": "1.2.0" + }, + "parameters": [ + { + "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2", + "name": "KIE_CONTAINER_DEPLOYMENT", + "value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final", + "required": false + }, + { + "description": "The user name to access the KIE Server REST or JMS interface.", + "name": "KIE_SERVER_USER", + "value": "kieserver", + "required": false + }, + { + "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).", + "name": "KIE_SERVER_PASSWORD", + "from": "[a-zA-Z]{6}[0-9]{1}!", + "generate": "expression", + "required": false + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "kie-app", + "required": true + }, + { + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "required": true + }, + { + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "1.2", + "required": false + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "decisionserver/hellorules", + "required": false + }, + { + "description": "Queue names", + "name": "HORNETQ_QUEUES", + "value": "", + "required": false + }, + { + "description": "Topic names", + "name": "HORNETQ_TOPICS", + "value": "", + "required": false + }, + { + "description": "HornetQ cluster admin password", + "name": "HORNETQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "KIE_CONTAINER_DEPLOYMENT", + "value": "${KIE_CONTAINER_DEPLOYMENT}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-decisionserver62-openshift:1.2" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "KIE_CONTAINER_DEPLOYMENT", + "value": "${KIE_CONTAINER_DEPLOYMENT}" + }, + { + "name": "KIE_SERVER_USER", + "value": "${KIE_SERVER_USER}" + }, + { + "name": "KIE_SERVER_PASSWORD", + "value": "${KIE_SERVER_PASSWORD}" + }, + { + "name": "HORNETQ_CLUSTER_PASSWORD", + "value": "${HORNETQ_CLUSTER_PASSWORD}" + }, + { + "name": "HORNETQ_QUEUES", + "value": "${HORNETQ_QUEUES}" + }, + { + "name": "HORNETQ_TOPICS", + "value": "${HORNETQ_TOPICS}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-https-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-https-s2i.json new file mode 100644 index 000000000..d0505fc5f --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-https-s2i.json @@ -0,0 +1,478 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for BRMS Realtime Decision Server 6 HTTPS applications built using S2I.", + "iconClass": "icon-jboss", + "tags": "decisionserver,java,jboss,xpaas", + "version": "1.2.0" + }, + "name": "decisionserver62-https-s2i" + }, + "labels": { + "template": "decisionserver62-https-s2i", + "xpaas": "1.2.0" + }, + "parameters": [ + { + "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2", + "name": "KIE_CONTAINER_DEPLOYMENT", + "value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final", + "required": false + }, + { + "description": "The protocol to access the KIE Server REST interface.", + "name": "KIE_SERVER_PROTOCOL", + "value": "https", + "required": false + }, + { + "description": "The port to access the KIE Server REST interface.", + "name": "KIE_SERVER_PORT", + "value": "8443", + "required": false + }, + { + "description": "The user name to access the KIE Server REST or JMS interface.", + "name": "KIE_SERVER_USER", + "value": "kieserver", + "required": false + }, + { + "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).", + "name": "KIE_SERVER_PASSWORD", + "from": "[a-zA-Z]{6}[0-9]{1}!", + "generate": "expression", + "required": false + }, + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "kie-app", + "required": true + }, + { + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", + "value": "", + "required": false + }, + { + "description": "Git source URI for application", + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "required": true + }, + { + "description": "Git branch/tag reference", + "name": "SOURCE_REPOSITORY_REF", + "value": "1.2", + "required": false + }, + { + "description": "Path within Git project to build; empty for root project directory.", + "name": "CONTEXT_DIR", + "value": "decisionserver/hellorules", + "required": false + }, + { + "description": "Queue names", + "name": "HORNETQ_QUEUES", + "value": "", + "required": false + }, + { + "description": "Topic names", + "name": "HORNETQ_TOPICS", + "value": "", + "required": false + }, + { + "description": "The name of the secret containing the keystore file", + "name": "HTTPS_SECRET", + "value": "decisionserver-app-secret", + "required": true + }, + { + "description": "The name of the keystore file within the secret", + "name": "HTTPS_KEYSTORE", + "value": "keystore.jks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "HTTPS_NAME", + "value": "jboss", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "HTTPS_PASSWORD", + "value": "mykeystorepass", + "required": false + }, + { + "description": "HornetQ cluster admin password", + "name": "HORNETQ_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "Generic build trigger secret", + "name": "GENERIC_WEBHOOK_SECRET", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's http port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8443, + "targetPort": 8443 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + } + }, + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The web server's https port." + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-http", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's http service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTP}", + "to": { + "name": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "id": "${APPLICATION_NAME}-https", + "metadata": { + "name": "secure-${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's https service." + } + }, + "spec": { + "host": "${HOSTNAME_HTTPS}", + "to": { + "name": "secure-${APPLICATION_NAME}" + }, + "tls": { + "termination": "passthrough" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "env": [ + { + "name": "KIE_CONTAINER_DEPLOYMENT", + "value": "${KIE_CONTAINER_DEPLOYMENT}" + } + ], + "forcePull": true, + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-decisionserver62-openshift:1.2" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${APPLICATION_NAME}:latest" + } + }, + "triggers": [ + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + }, + { + "type": "ImageChange", + "imageChange": {} + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}" + ], + "from": { + "kind": "ImageStream", + "name": "${APPLICATION_NAME}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "serviceAccount": "decisionserver-service-account", + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}", + "image": "${APPLICATION_NAME}", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "name": "decisionserver-keystore-volume", + "mountPath": "/etc/decisionserver-secret-volume", + "readOnly": true + } + ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/readinessProbe.sh" + ] + } + }, + "ports": [ + { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { + "name": "http", + "containerPort": 8080, + "protocol": "TCP" + }, + { + "name": "https", + "containerPort": 8443, + "protocol": "TCP" + }, + { + "name": "ping", + "containerPort": 8888, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "KIE_CONTAINER_DEPLOYMENT", + "value": "${KIE_CONTAINER_DEPLOYMENT}" + }, + { + "name": "KIE_SERVER_PROTOCOL", + "value": "${KIE_SERVER_PROTOCOL}" + }, + { + "name": "KIE_SERVER_PORT", + "value": "${KIE_SERVER_PORT}" + }, + { + "name": "KIE_SERVER_USER", + "value": "${KIE_SERVER_USER}" + }, + { + "name": "KIE_SERVER_PASSWORD", + "value": "${KIE_SERVER_PASSWORD}" + }, + { + "name": "HTTPS_KEYSTORE_DIR", + "value": "/etc/decisionserver-secret-volume" + }, + { + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "HORNETQ_CLUSTER_PASSWORD", + "value": "${HORNETQ_CLUSTER_PASSWORD}" + }, + { + "name": "HORNETQ_QUEUES", + "value": "${HORNETQ_QUEUES}" + }, + { + "name": "HORNETQ_TOPICS", + "value": "${HORNETQ_TOPICS}" + } + ] + } + ], + "volumes": [ + { + "name": "decisionserver-keystore-volume", + "secret": { + "secretName": "${HTTPS_SECRET}" + } + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json index 2fc3b5b25..4b38dade3 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json @@ -6,13 +6,13 @@ "description": "Application template for EAP 6 A-MQ applications with persistent storage built using S2I.", "iconClass": "icon-jboss", "tags": "eap,amq,javaee,java,messaging,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "eap64-amq-persistent-s2i" }, "labels": { "template": "eap64-amq-persistent-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,21 +22,27 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, { "description": "Git source URI for application", "name": "SOURCE_REPOSITORY_URL", - "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git", "required": true }, { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "6.4.x", "required": false }, { @@ -77,25 +83,25 @@ }, { "description": "The name of the secret containing the keystore file", - "name": "EAP_HTTPS_SECRET", + "name": "HTTPS_SECRET", "value": "eap-app-secret", "required": false }, { "description": "The name of the keystore file within the secret", - "name": "EAP_HTTPS_KEYSTORE", + "name": "HTTPS_KEYSTORE", "value": "keystore.jks", "required": false }, { "description": "The name associated with the server certificate", - "name": "EAP_HTTPS_NAME", + "name": "HTTPS_NAME", "value": "", "required": false }, { "description": "The password for the keystore and certificate", - "name": "EAP_HTTPS_PASSWORD", + "name": "HTTPS_PASSWORD", "value": "", "required": false }, @@ -146,6 +152,37 @@ "name": "IMAGE_STREAM_NAMESPACE", "value": "openshift", "required": true + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true } ], "objects": [ @@ -235,7 +272,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -255,7 +292,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -299,7 +336,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-eap64-openshift:1.1" + "name": "jboss-eap64-openshift:1.2" } } }, @@ -388,8 +425,22 @@ "name": "eap-keystore-volume", "mountPath": "/etc/eap-secret-volume", "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true } ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, "readinessProbe": { "exec": { "command": [ @@ -401,6 +452,11 @@ }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -458,20 +514,44 @@ } }, { - "name": "EAP_HTTPS_KEYSTORE_DIR", + "name": "HTTPS_KEYSTORE_DIR", "value": "/etc/eap-secret-volume" }, { - "name": "EAP_HTTPS_KEYSTORE", - "value": "${EAP_HTTPS_KEYSTORE}" + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" }, { - "name": "EAP_HTTPS_NAME", - "value": "${EAP_HTTPS_NAME}" + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" }, { - "name": "EAP_HTTPS_PASSWORD", - "value": "${EAP_HTTPS_PASSWORD}" + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" } ] } @@ -480,7 +560,13 @@ { "name": "eap-keystore-volume", "secret": { - "secretName": "${EAP_HTTPS_SECRET}" + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" } } ] @@ -512,7 +598,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-62:1.1" + "name": "jboss-amq-62:1.2" } } }, @@ -544,7 +630,7 @@ "command": [ "/bin/bash", "-c", - "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'" + "/opt/amq/bin/readinessProbe.sh" ] } }, diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json index a420bb1ea..d321af9c9 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json @@ -6,13 +6,13 @@ "description": "Application template for EAP 6 A-MQ applications built using S2I.", "iconClass": "icon-jboss", "tags": "eap,amq,javaee,java,messaging,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "eap64-amq-s2i" }, "labels": { "template": "eap64-amq-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,21 +22,27 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, { "description": "Git source URI for application", "name": "SOURCE_REPOSITORY_URL", - "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git", "required": true }, { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "6.4.x", "required": false }, { @@ -71,25 +77,25 @@ }, { "description": "The name of the secret containing the keystore file", - "name": "EAP_HTTPS_SECRET", + "name": "HTTPS_SECRET", "value": "eap-app-secret", "required": false }, { "description": "The name of the keystore file within the secret", - "name": "EAP_HTTPS_KEYSTORE", + "name": "HTTPS_KEYSTORE", "value": "keystore.jks", "required": false }, { "description": "The name associated with the server certificate", - "name": "EAP_HTTPS_NAME", + "name": "HTTPS_NAME", "value": "", "required": false }, { "description": "The password for the keystore and certificate", - "name": "EAP_HTTPS_PASSWORD", + "name": "HTTPS_PASSWORD", "value": "", "required": false }, @@ -140,6 +146,37 @@ "name": "IMAGE_STREAM_NAMESPACE", "value": "openshift", "required": true + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true } ], "objects": [ @@ -229,7 +266,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -249,7 +286,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -293,7 +330,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-eap64-openshift:1.1" + "name": "jboss-eap64-openshift:1.2" } } }, @@ -382,8 +419,22 @@ "name": "eap-keystore-volume", "mountPath": "/etc/eap-secret-volume", "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true } ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, "readinessProbe": { "exec": { "command": [ @@ -395,6 +446,11 @@ }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -452,20 +508,44 @@ } }, { - "name": "EAP_HTTPS_KEYSTORE_DIR", + "name": "HTTPS_KEYSTORE_DIR", "value": "/etc/eap-secret-volume" }, { - "name": "EAP_HTTPS_KEYSTORE", - "value": "${EAP_HTTPS_KEYSTORE}" + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" + }, + { + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" + }, + { + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" }, { - "name": "EAP_HTTPS_NAME", - "value": "${EAP_HTTPS_NAME}" + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" }, { - "name": "EAP_HTTPS_PASSWORD", - "value": "${EAP_HTTPS_PASSWORD}" + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" } ] } @@ -474,7 +554,13 @@ { "name": "eap-keystore-volume", "secret": { - "secretName": "${EAP_HTTPS_SECRET}" + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" } } ] @@ -506,7 +592,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-amq-62:1.1" + "name": "jboss-amq-62:1.2" } } }, @@ -538,7 +624,7 @@ "command": [ "/bin/bash", "-c", - "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'" + "/opt/amq/bin/readinessProbe.sh" ] } }, diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-basic-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-basic-s2i.json index 3f90eb8be..2e3849e2a 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-basic-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-jboss", "description": "Application template for EAP 6 applications built using S2I.", "tags": "eap,javaee,java,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "eap64-basic-s2i" }, "labels": { "template": "eap64-basic-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,8 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", "value": "", "required": false }, @@ -83,6 +83,13 @@ "name": "IMAGE_STREAM_NAMESPACE", "value": "openshift", "required": true + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true } ], "objects": [ @@ -124,7 +131,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -165,7 +172,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-eap64-openshift:1.1" + "name": "jboss-eap64-openshift:1.2" } } }, @@ -248,6 +255,15 @@ "name": "${APPLICATION_NAME}", "image": "${APPLICATION_NAME}", "imagePullPolicy": "Always", + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, "readinessProbe": { "exec": { "command": [ @@ -259,6 +275,11 @@ }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -293,6 +314,10 @@ { "name": "HORNETQ_TOPICS", "value": "${HORNETQ_TOPICS}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" } ] } @@ -302,4 +327,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-https-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-https-s2i.json index 220d2f5b9..54514cb6b 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-https-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-https-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-jboss", "description": "Application template for EAP 6 applications built using S2I.", "tags": "eap,javaee,java,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "eap64-https-s2i" }, "labels": { "template": "eap64-https-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -59,25 +65,25 @@ }, { "description": "The name of the secret containing the keystore file", - "name": "EAP_HTTPS_SECRET", + "name": "HTTPS_SECRET", "value": "eap-app-secret", "required": true }, { "description": "The name of the keystore file within the secret", - "name": "EAP_HTTPS_KEYSTORE", + "name": "HTTPS_KEYSTORE", "value": "keystore.jks", "required": false }, { "description": "The name associated with the server certificate", - "name": "EAP_HTTPS_NAME", + "name": "HTTPS_NAME", "value": "", "required": false }, { "description": "The password for the keystore and certificate", - "name": "EAP_HTTPS_PASSWORD", + "name": "HTTPS_PASSWORD", "value": "", "required": false }, @@ -107,6 +113,37 @@ "name": "IMAGE_STREAM_NAMESPACE", "value": "openshift", "required": true + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true } ], "objects": [ @@ -172,7 +209,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -192,7 +229,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -236,7 +273,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-eap64-openshift:1.1" + "name": "jboss-eap64-openshift:1.2" } } }, @@ -325,8 +362,22 @@ "name": "eap-keystore-volume", "mountPath": "/etc/eap-secret-volume", "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true } ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, "readinessProbe": { "exec": { "command": [ @@ -338,6 +389,11 @@ }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -367,20 +423,20 @@ } }, { - "name": "EAP_HTTPS_KEYSTORE_DIR", + "name": "HTTPS_KEYSTORE_DIR", "value": "/etc/eap-secret-volume" }, { - "name": "EAP_HTTPS_KEYSTORE", - "value": "${EAP_HTTPS_KEYSTORE}" + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" }, { - "name": "EAP_HTTPS_NAME", - "value": "${EAP_HTTPS_NAME}" + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" }, { - "name": "EAP_HTTPS_PASSWORD", - "value": "${EAP_HTTPS_PASSWORD}" + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" }, { "name": "HORNETQ_CLUSTER_PASSWORD", @@ -393,6 +449,30 @@ { "name": "HORNETQ_TOPICS", "value": "${HORNETQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" } ] } @@ -401,7 +481,13 @@ { "name": "eap-keystore-volume", "secret": { - "secretName": "${EAP_HTTPS_SECRET}" + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" } } ] @@ -410,4 +496,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-persistent-s2i.json index a1a3a9f2c..2c0f21ae3 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-persistent-s2i.json @@ -6,13 +6,13 @@ "description": "Application template for EAP 6 MongDB applications with persistent storage built using S2I.", "iconClass": "icon-jboss", "tags": "eap,mongodb,javaee,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "eap64-mongodb-persistent-s2i" }, "labels": { "template": "eap64-mongodb-persistent-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -77,25 +83,25 @@ }, { "description": "The name of the secret containing the keystore file", - "name": "EAP_HTTPS_SECRET", + "name": "HTTPS_SECRET", "value": "eap-app-secret", "required": false }, { "description": "The name of the keystore file within the secret", - "name": "EAP_HTTPS_KEYSTORE", + "name": "HTTPS_KEYSTORE", "value": "keystore.jks", "required": false }, { "description": "The name associated with the server certificate", - "name": "EAP_HTTPS_NAME", + "name": "HTTPS_NAME", "value": "", "required": false }, { "description": "The password for the keystore and certificate", - "name": "EAP_HTTPS_PASSWORD", + "name": "HTTPS_PASSWORD", "value": "", "required": false }, @@ -176,6 +182,37 @@ "name": "IMAGE_STREAM_NAMESPACE", "value": "openshift", "required": true + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true } ], "objects": [ @@ -265,7 +302,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -285,7 +322,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -329,7 +366,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-eap64-openshift:1.1" + "name": "jboss-eap64-openshift:1.2" } } }, @@ -418,8 +455,22 @@ "name": "eap-keystore-volume", "mountPath": "/etc/eap-secret-volume", "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true } ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, "readinessProbe": { "exec": { "command": [ @@ -431,6 +482,11 @@ }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -496,20 +552,20 @@ } }, { - "name": "EAP_HTTPS_KEYSTORE_DIR", + "name": "HTTPS_KEYSTORE_DIR", "value": "/etc/eap-secret-volume" }, { - "name": "EAP_HTTPS_KEYSTORE", - "value": "${EAP_HTTPS_KEYSTORE}" + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" }, { - "name": "EAP_HTTPS_NAME", - "value": "${EAP_HTTPS_NAME}" + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" }, { - "name": "EAP_HTTPS_PASSWORD", - "value": "${EAP_HTTPS_PASSWORD}" + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" }, { "name": "HORNETQ_CLUSTER_PASSWORD", @@ -522,6 +578,30 @@ { "name": "HORNETQ_TOPICS", "value": "${HORNETQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" } ] } @@ -530,7 +610,13 @@ { "name": "eap-keystore-volume", "secret": { - "secretName": "${EAP_HTTPS_SECRET}" + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" } } ] @@ -666,4 +752,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-s2i.json index dfd1443ed..6f604d29e 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-s2i.json @@ -6,13 +6,13 @@ "description": "Application template for EAP 6 MongDB applications built using S2I.", "iconClass": "icon-jboss", "tags": "eap,mongodb,javaee,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "eap64-mongodb-s2i" }, "labels": { "template": "eap64-mongodb-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -71,25 +77,25 @@ }, { "description": "The name of the secret containing the keystore file", - "name": "EAP_HTTPS_SECRET", + "name": "HTTPS_SECRET", "value": "eap-app-secret", "required": false }, { "description": "The name of the keystore file within the secret", - "name": "EAP_HTTPS_KEYSTORE", + "name": "HTTPS_KEYSTORE", "value": "keystore.jks", "required": false }, { "description": "The name associated with the server certificate", - "name": "EAP_HTTPS_NAME", + "name": "HTTPS_NAME", "value": "", "required": false }, { "description": "The password for the keystore and certificate", - "name": "EAP_HTTPS_PASSWORD", + "name": "HTTPS_PASSWORD", "value": "", "required": false }, @@ -170,6 +176,37 @@ "name": "IMAGE_STREAM_NAMESPACE", "value": "openshift", "required": true + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true } ], "objects": [ @@ -259,7 +296,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -279,7 +316,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -323,7 +360,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-eap64-openshift:1.1" + "name": "jboss-eap64-openshift:1.2" } } }, @@ -412,8 +449,22 @@ "name": "eap-keystore-volume", "mountPath": "/etc/eap-secret-volume", "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true } ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, "readinessProbe": { "exec": { "command": [ @@ -425,6 +476,11 @@ }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -490,20 +546,20 @@ } }, { - "name": "EAP_HTTPS_KEYSTORE_DIR", + "name": "HTTPS_KEYSTORE_DIR", "value": "/etc/eap-secret-volume" }, { - "name": "EAP_HTTPS_KEYSTORE", - "value": "${EAP_HTTPS_KEYSTORE}" + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" }, { - "name": "EAP_HTTPS_NAME", - "value": "${EAP_HTTPS_NAME}" + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" }, { - "name": "EAP_HTTPS_PASSWORD", - "value": "${EAP_HTTPS_PASSWORD}" + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" }, { "name": "HORNETQ_CLUSTER_PASSWORD", @@ -516,6 +572,30 @@ { "name": "HORNETQ_TOPICS", "value": "${HORNETQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" } ] } @@ -524,7 +604,13 @@ { "name": "eap-keystore-volume", "secret": { - "secretName": "${EAP_HTTPS_SECRET}" + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" } } ] @@ -626,4 +712,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-persistent-s2i.json index fdd368a5f..d2631580b 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-persistent-s2i.json @@ -6,13 +6,13 @@ "description": "Application template for EAP 6 MySQL applications with persistent storage built using S2I.", "iconClass": "icon-jboss", "tags": "eap,mysql,javaee,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "eap64-mysql-persistent-s2i" }, "labels": { "template": "eap64-mysql-persistent-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -77,25 +83,25 @@ }, { "description": "The name of the secret containing the keystore file", - "name": "EAP_HTTPS_SECRET", + "name": "HTTPS_SECRET", "value": "eap-app-secret", "required": false }, { "description": "The name of the keystore file within the secret", - "name": "EAP_HTTPS_KEYSTORE", + "name": "HTTPS_KEYSTORE", "value": "keystore.jks", "required": false }, { "description": "The name associated with the server certificate", - "name": "EAP_HTTPS_NAME", + "name": "HTTPS_NAME", "value": "", "required": false }, { "description": "The password for the keystore and certificate", - "name": "EAP_HTTPS_PASSWORD", + "name": "HTTPS_PASSWORD", "value": "", "required": false }, @@ -179,6 +185,37 @@ "name": "IMAGE_STREAM_NAMESPACE", "value": "openshift", "required": true + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true } ], "objects": [ @@ -268,7 +305,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -288,7 +325,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -332,7 +369,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-eap64-openshift:1.1" + "name": "jboss-eap64-openshift:1.2" } } }, @@ -421,8 +458,22 @@ "name": "eap-keystore-volume", "mountPath": "/etc/eap-secret-volume", "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true } ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, "readinessProbe": { "exec": { "command": [ @@ -434,6 +485,11 @@ }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -499,20 +555,20 @@ } }, { - "name": "EAP_HTTPS_KEYSTORE_DIR", + "name": "HTTPS_KEYSTORE_DIR", "value": "/etc/eap-secret-volume" }, { - "name": "EAP_HTTPS_KEYSTORE", - "value": "${EAP_HTTPS_KEYSTORE}" + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" }, { - "name": "EAP_HTTPS_NAME", - "value": "${EAP_HTTPS_NAME}" + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" }, { - "name": "EAP_HTTPS_PASSWORD", - "value": "${EAP_HTTPS_PASSWORD}" + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" }, { "name": "HORNETQ_CLUSTER_PASSWORD", @@ -525,6 +581,30 @@ { "name": "HORNETQ_TOPICS", "value": "${HORNETQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" } ] } @@ -533,7 +613,13 @@ { "name": "eap-keystore-volume", "secret": { - "secretName": "${EAP_HTTPS_SECRET}" + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" } } ] @@ -673,4 +759,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-s2i.json index ff6bdc112..ba6a32fec 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-s2i.json @@ -6,13 +6,13 @@ "description": "Application template for EAP 6 MySQL applications built using S2I.", "iconClass": "icon-jboss", "tags": "eap,mysql,javaee,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "eap64-mysql-s2i" }, "labels": { "template": "eap64-mysql-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -71,25 +77,25 @@ }, { "description": "The name of the secret containing the keystore file", - "name": "EAP_HTTPS_SECRET", + "name": "HTTPS_SECRET", "value": "eap-app-secret", "required": false }, { "description": "The name of the keystore file within the secret", - "name": "EAP_HTTPS_KEYSTORE", + "name": "HTTPS_KEYSTORE", "value": "keystore.jks", "required": false }, { "description": "The name associated with the server certificate", - "name": "EAP_HTTPS_NAME", + "name": "HTTPS_NAME", "value": "", "required": false }, { "description": "The password for the keystore and certificate", - "name": "EAP_HTTPS_PASSWORD", + "name": "HTTPS_PASSWORD", "value": "", "required": false }, @@ -173,6 +179,37 @@ "name": "IMAGE_STREAM_NAMESPACE", "value": "openshift", "required": true + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true } ], "objects": [ @@ -262,7 +299,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -282,7 +319,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -326,7 +363,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-eap64-openshift:1.1" + "name": "jboss-eap64-openshift:1.2" } } }, @@ -415,8 +452,22 @@ "name": "eap-keystore-volume", "mountPath": "/etc/eap-secret-volume", "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true } ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, "readinessProbe": { "exec": { "command": [ @@ -428,6 +479,11 @@ }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -493,20 +549,20 @@ } }, { - "name": "EAP_HTTPS_KEYSTORE_DIR", + "name": "HTTPS_KEYSTORE_DIR", "value": "/etc/eap-secret-volume" }, { - "name": "EAP_HTTPS_KEYSTORE", - "value": "${EAP_HTTPS_KEYSTORE}" + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" }, { - "name": "EAP_HTTPS_NAME", - "value": "${EAP_HTTPS_NAME}" + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" }, { - "name": "EAP_HTTPS_PASSWORD", - "value": "${EAP_HTTPS_PASSWORD}" + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" }, { "name": "HORNETQ_CLUSTER_PASSWORD", @@ -519,6 +575,30 @@ { "name": "HORNETQ_TOPICS", "value": "${HORNETQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" } ] } @@ -527,7 +607,13 @@ { "name": "eap-keystore-volume", "secret": { - "secretName": "${EAP_HTTPS_SECRET}" + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" } } ] @@ -633,4 +719,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-persistent-s2i.json index 6443afdb0..670260769 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-persistent-s2i.json @@ -6,13 +6,13 @@ "description": "Application template for EAP 6 PostgreSQL applications with persistent storage built using S2I.", "iconClass": "icon-jboss", "tags": "eap,postgresql,javaee,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "eap64-postgresql-persistent-s2i" }, "labels": { "template": "eap64-postgresql-persistent-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -77,25 +83,25 @@ }, { "description": "The name of the secret containing the keystore file", - "name": "EAP_HTTPS_SECRET", + "name": "HTTPS_SECRET", "value": "eap-app-secret", "required": false }, { "description": "The name of the keystore file within the secret", - "name": "EAP_HTTPS_KEYSTORE", + "name": "HTTPS_KEYSTORE", "value": "keystore.jks", "required": false }, { "description": "The name associated with the server certificate", - "name": "EAP_HTTPS_NAME", + "name": "HTTPS_NAME", "value": "", "required": false }, { "description": "The password for the keystore and certificate", - "name": "EAP_HTTPS_PASSWORD", + "name": "HTTPS_PASSWORD", "value": "", "required": false }, @@ -164,6 +170,37 @@ "name": "IMAGE_STREAM_NAMESPACE", "value": "openshift", "required": true + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true } ], "objects": [ @@ -253,7 +290,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -273,7 +310,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -317,7 +354,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-eap64-openshift:1.1" + "name": "jboss-eap64-openshift:1.2" } } }, @@ -406,8 +443,22 @@ "name": "eap-keystore-volume", "mountPath": "/etc/eap-secret-volume", "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true } ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, "readinessProbe": { "exec": { "command": [ @@ -419,6 +470,11 @@ }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -484,20 +540,20 @@ } }, { - "name": "EAP_HTTPS_KEYSTORE_DIR", + "name": "HTTPS_KEYSTORE_DIR", "value": "/etc/eap-secret-volume" }, { - "name": "EAP_HTTPS_KEYSTORE", - "value": "${EAP_HTTPS_KEYSTORE}" + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" }, { - "name": "EAP_HTTPS_NAME", - "value": "${EAP_HTTPS_NAME}" + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" }, { - "name": "EAP_HTTPS_PASSWORD", - "value": "${EAP_HTTPS_PASSWORD}" + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" }, { "name": "HORNETQ_CLUSTER_PASSWORD", @@ -510,6 +566,30 @@ { "name": "HORNETQ_TOPICS", "value": "${HORNETQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" } ] } @@ -518,7 +598,13 @@ { "name": "eap-keystore-volume", "secret": { - "secretName": "${EAP_HTTPS_SECRET}" + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" } } ] @@ -646,4 +732,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-s2i.json index e879e51cf..822731335 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-s2i.json @@ -6,13 +6,13 @@ "description": "Application template for EAP 6 PostgreSQL applications built using S2I.", "iconClass": "icon-jboss", "tags": "eap,postgresql,javaee,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "eap64-postgresql-s2i" }, "labels": { "template": "eap64-postgresql-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -71,25 +77,25 @@ }, { "description": "The name of the secret containing the keystore file", - "name": "EAP_HTTPS_SECRET", + "name": "HTTPS_SECRET", "value": "eap-app-secret", "required": false }, { "description": "The name of the keystore file within the secret", - "name": "EAP_HTTPS_KEYSTORE", + "name": "HTTPS_KEYSTORE", "value": "keystore.jks", "required": false }, { "description": "The name associated with the server certificate", - "name": "EAP_HTTPS_NAME", + "name": "HTTPS_NAME", "value": "", "required": false }, { "description": "The password for the keystore and certificate", - "name": "EAP_HTTPS_PASSWORD", + "name": "HTTPS_PASSWORD", "value": "", "required": false }, @@ -158,6 +164,37 @@ "name": "IMAGE_STREAM_NAMESPACE", "value": "openshift", "required": true + }, + { + "description": "The name of the secret containing the keystore file", + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "eap-app-secret", + "required": false + }, + { + "description": "The name of the keystore file within the secret", + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "jgroups.jceks", + "required": false + }, + { + "description": "The name associated with the server certificate", + "name": "JGROUPS_ENCRYPT_NAME", + "value": "", + "required": false + }, + { + "description": "The password for the keystore and certificate", + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "", + "required": false + }, + { + "description": "JGroups cluster password", + "name": "JGROUPS_CLUSTER_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true } ], "objects": [ @@ -247,7 +284,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -267,7 +304,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -311,7 +348,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-eap64-openshift:1.1" + "name": "jboss-eap64-openshift:1.2" } } }, @@ -400,8 +437,22 @@ "name": "eap-keystore-volume", "mountPath": "/etc/eap-secret-volume", "readOnly": true + }, + { + "name": "eap-jgroups-keystore-volume", + "mountPath": "/etc/jgroups-encrypt-secret-volume", + "readOnly": true } ], + "livenessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "/opt/eap/bin/livenessProbe.sh" + ] + } + }, "readinessProbe": { "exec": { "command": [ @@ -413,6 +464,11 @@ }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -478,20 +534,20 @@ } }, { - "name": "EAP_HTTPS_KEYSTORE_DIR", + "name": "HTTPS_KEYSTORE_DIR", "value": "/etc/eap-secret-volume" }, { - "name": "EAP_HTTPS_KEYSTORE", - "value": "${EAP_HTTPS_KEYSTORE}" + "name": "HTTPS_KEYSTORE", + "value": "${HTTPS_KEYSTORE}" }, { - "name": "EAP_HTTPS_NAME", - "value": "${EAP_HTTPS_NAME}" + "name": "HTTPS_NAME", + "value": "${HTTPS_NAME}" }, { - "name": "EAP_HTTPS_PASSWORD", - "value": "${EAP_HTTPS_PASSWORD}" + "name": "HTTPS_PASSWORD", + "value": "${HTTPS_PASSWORD}" }, { "name": "HORNETQ_CLUSTER_PASSWORD", @@ -504,6 +560,30 @@ { "name": "HORNETQ_TOPICS", "value": "${HORNETQ_TOPICS}" + }, + { + "name": "JGROUPS_ENCRYPT_SECRET", + "value": "${JGROUPS_ENCRYPT_SECRET}" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR", + "value": "/etc/jgroups-encrypt-secret-volume" + }, + { + "name": "JGROUPS_ENCRYPT_KEYSTORE", + "value": "${JGROUPS_ENCRYPT_KEYSTORE}" + }, + { + "name": "JGROUPS_ENCRYPT_NAME", + "value": "${JGROUPS_ENCRYPT_NAME}" + }, + { + "name": "JGROUPS_ENCRYPT_PASSWORD", + "value": "${JGROUPS_ENCRYPT_PASSWORD}" + }, + { + "name": "JGROUPS_CLUSTER_PASSWORD", + "value": "${JGROUPS_CLUSTER_PASSWORD}" } ] } @@ -512,7 +592,13 @@ { "name": "eap-keystore-volume", "secret": { - "secretName": "${EAP_HTTPS_SECRET}" + "secretName": "${HTTPS_SECRET}" + } + }, + { + "name": "eap-jgroups-keystore-volume", + "secret": { + "secretName": "${JGROUPS_ENCRYPT_SECRET}" } } ] @@ -606,4 +692,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-basic-s2i.json index 729079130..376f2f61b 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-basic-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS applications built using S2I.", "tags": "tomcat,tomcat7,java,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat7-basic-s2i" }, "labels": { "template": "jws30-tomcat7-basic-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,8 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", "value": "", "required": false }, @@ -36,7 +36,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -119,7 +119,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -160,7 +160,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat7-openshift:1.1" + "name": "jboss-webserver30-tomcat7-openshift:1.2" } } }, @@ -254,6 +254,11 @@ }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-https-s2i.json index 7ce7e7fe2..e7bbd1154 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-https-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-https-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS applications built using S2I.", "tags": "tomcat,tomcat7,java,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat7-https-s2i" }, "labels": { "template": "jws30-tomcat7-https-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -167,7 +173,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -187,7 +193,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -231,7 +237,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat7-openshift:1.1" + "name": "jboss-webserver30-tomcat7-openshift:1.2" } } }, @@ -333,6 +339,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json index 9a08ec0b0..3a3ca8e24 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.", "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat7-mongodb-persistent-s2i" }, "labels": { "template": "jws30-tomcat7-mongodb-persistent-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -260,7 +266,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -280,7 +286,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -324,7 +330,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat7-openshift:1.1" + "name": "jboss-webserver30-tomcat7-openshift:1.2" } } }, @@ -426,6 +432,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -640,4 +651,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-s2i.json index b8dfb3ad3..b3fd295b9 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS MongoDB applications built using S2I.", "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat7-mongodb-s2i" }, "labels": { "template": "jws30-tomcat7-mongodb-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -254,7 +260,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -274,7 +280,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -318,7 +324,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat7-openshift:1.1" + "name": "jboss-webserver30-tomcat7-openshift:1.2" } } }, @@ -420,6 +426,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -600,4 +611,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json index d36e330d3..b0b2f5ec4 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS MySQL applications with persistent storage built using S2I.", "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat7-mysql-persistent-s2i" }, "labels": { "template": "jws30-tomcat7-mysql-persistent-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -46,9 +52,9 @@ "required": false }, { - "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS", "name": "DB_JNDI", - "value": "java:jboss/datasources/TodoListDS", + "value": "jboss/datasources/defaultDS", "required": false }, { @@ -263,7 +269,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -283,7 +289,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -327,7 +333,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat7-openshift:1.1" + "name": "jboss-webserver30-tomcat7-openshift:1.2" } } }, @@ -429,6 +435,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -642,4 +653,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-s2i.json index f5309db60..e48276f1b 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS MySQL applications built using S2I.", "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat7-mysql-s2i" }, "labels": { "template": "jws30-tomcat7-mysql-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -46,9 +52,9 @@ "required": false }, { - "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS", "name": "DB_JNDI", - "value": "java:jboss/datasources/TodoListDS", + "value": "jboss/datasources/defaultDS", "required": false }, { @@ -257,7 +263,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -277,7 +283,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -321,7 +327,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat7-openshift:1.1" + "name": "jboss-webserver30-tomcat7-openshift:1.2" } } }, @@ -423,6 +429,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -602,4 +613,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json index ee88a4c69..d7876d066 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.", "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat7-postgresql-persistent-s2i" }, "labels": { "template": "jws30-tomcat7-postgresql-persistent-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -46,9 +52,9 @@ "required": false }, { - "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS", "name": "DB_JNDI", - "value": "java:jboss/datasources/TodoListDS", + "value": "jboss/datasources/defaultDS", "required": false }, { @@ -248,7 +254,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -268,7 +274,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -312,7 +318,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat7-openshift:1.1" + "name": "jboss-webserver30-tomcat7-openshift:1.2" } } }, @@ -414,6 +420,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -615,4 +626,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-s2i.json index f5940a7a1..5c1bdf6d5 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS PostgreSQL applications built using S2I.", "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat7-postgresql-s2i" }, "labels": { "template": "jws30-tomcat7-postgresql-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -46,9 +52,9 @@ "required": false }, { - "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS", "name": "DB_JNDI", - "value": "java:jboss/datasources/TodoListDS", + "value": "jboss/datasources/defaultDS", "required": false }, { @@ -242,7 +248,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -262,7 +268,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -306,7 +312,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat7-openshift:1.1" + "name": "jboss-webserver30-tomcat7-openshift:1.2" } } }, @@ -408,6 +414,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -575,4 +586,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-basic-s2i.json index b24ce40ae..b425891c6 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-basic-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS applications built using S2I.", "tags": "tomcat,tomcat8,java,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat8-basic-s2i" }, "labels": { "template": "jws30-tomcat8-basic-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,8 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", "value": "", "required": false }, @@ -36,7 +36,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -119,7 +119,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -160,7 +160,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat8-openshift:1.1" + "name": "jboss-webserver30-tomcat8-openshift:1.2" } } }, @@ -254,6 +254,11 @@ }, "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-https-s2i.json index 7e788d0db..a20518cbc 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-https-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-https-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS applications built using S2I.", "tags": "tomcat,tomcat8,java,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat8-https-s2i" }, "labels": { "template": "jws30-tomcat8-https-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -167,7 +173,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -187,7 +193,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -231,7 +237,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat8-openshift:1.1" + "name": "jboss-webserver30-tomcat8-openshift:1.2" } } }, @@ -333,6 +339,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json index 2f1d69c75..46b99593d 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.", "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat8-mongodb-persistent-s2i" }, "labels": { "template": "jws30-tomcat8-mongodb-persistent-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -260,7 +266,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -280,7 +286,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -324,7 +330,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat8-openshift:1.1" + "name": "jboss-webserver30-tomcat8-openshift:1.2" } } }, @@ -426,6 +432,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -640,4 +651,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-s2i.json index bad676f2e..c01b6888a 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS MongoDB applications built using S2I.", "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat8-mongodb-s2i" }, "labels": { "template": "jws30-tomcat8-mongodb-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -254,7 +260,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -274,7 +280,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -318,7 +324,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat8-openshift:1.1" + "name": "jboss-webserver30-tomcat8-openshift:1.2" } } }, @@ -420,6 +426,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -600,4 +611,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json index e20a45982..ebe1dc6af 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS MySQL applications with persistent storage built using S2I.", "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat8-mysql-persistent-s2i" }, "labels": { "template": "jws30-tomcat8-mysql-persistent-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -46,9 +52,9 @@ "required": false }, { - "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS", "name": "DB_JNDI", - "value": "java:jboss/datasources/TodoListDS", + "value": "jboss/datasources/defaultDS", "required": false }, { @@ -263,7 +269,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -283,7 +289,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -327,7 +333,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat8-openshift:1.1" + "name": "jboss-webserver30-tomcat8-openshift:1.2" } } }, @@ -429,6 +435,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -642,4 +653,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-s2i.json index 1b9624756..fe068842a 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS MySQL applications built using S2I.", "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat8-mysql-s2i" }, "labels": { "template": "jws30-tomcat8-mysql-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -46,9 +52,9 @@ "required": false }, { - "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS", "name": "DB_JNDI", - "value": "java:jboss/datasources/TodoListDS", + "value": "jboss/datasources/defaultDS", "required": false }, { @@ -257,7 +263,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -277,7 +283,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -321,7 +327,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat8-openshift:1.1" + "name": "jboss-webserver30-tomcat8-openshift:1.2" } } }, @@ -423,6 +429,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -602,4 +613,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json index dc492a38e..302a55315 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.", "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat8-postgresql-persistent-s2i" }, "labels": { "template": "jws30-tomcat8-postgresql-persistent-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -46,9 +52,9 @@ "required": false }, { - "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS", "name": "DB_JNDI", - "value": "java:jboss/datasources/TodoListDS", + "value": "jboss/datasources/defaultDS", "required": false }, { @@ -248,7 +254,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -268,7 +274,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -312,7 +318,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat8-openshift:1.1" + "name": "jboss-webserver30-tomcat8-openshift:1.2" } } }, @@ -414,6 +420,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -615,4 +626,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-s2i.json index 242b37a79..af2415905 100644 --- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-s2i.json @@ -6,13 +6,13 @@ "iconClass": "icon-tomcat", "description": "Application template for JWS PostgreSQL applications built using S2I.", "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas", - "version": "1.1.0" + "version": "1.2.0" }, "name": "jws30-tomcat8-postgresql-s2i" }, "labels": { "template": "jws30-tomcat8-postgresql-s2i", - "xpaas": "1.1.0" + "xpaas": "1.2.0" }, "parameters": [ { @@ -22,8 +22,14 @@ "required": true }, { - "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_DOMAIN", + "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTP", + "value": "", + "required": false + }, + { + "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>", + "name": "HOSTNAME_HTTPS", "value": "", "required": false }, @@ -36,7 +42,7 @@ { "description": "Git branch/tag reference", "name": "SOURCE_REPOSITORY_REF", - "value": "1.1", + "value": "1.2", "required": false }, { @@ -46,9 +52,9 @@ "required": false }, { - "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", + "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS", "name": "DB_JNDI", - "value": "java:jboss/datasources/TodoListDS", + "value": "jboss/datasources/defaultDS", "required": false }, { @@ -242,7 +248,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTP}", "to": { "name": "${APPLICATION_NAME}" } @@ -262,7 +268,7 @@ } }, "spec": { - "host": "${APPLICATION_DOMAIN}", + "host": "${HOSTNAME_HTTPS}", "to": { "name": "secure-${APPLICATION_NAME}" }, @@ -306,7 +312,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${IMAGE_STREAM_NAMESPACE}", - "name": "jboss-webserver30-tomcat8-openshift:1.1" + "name": "jboss-webserver30-tomcat8-openshift:1.2" } } }, @@ -407,6 +413,11 @@ ], "ports": [ { + "name": "jolokia", + "containerPort": 8778, + "protocol": "TCP" + }, + { "name": "http", "containerPort": 8080, "protocol": "TCP" @@ -573,4 +584,4 @@ } } ] -}
\ No newline at end of file +} diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml index 42e7903fd..cdd813e6a 100644 --- a/roles/openshift_expand_partition/tasks/main.yml +++ b/roles/openshift_expand_partition/tasks/main.yml @@ -1,11 +1,14 @@ --- - name: Ensure growpart is installed - yum: pkg=cloud-utils-growpart state=present - when: ansible_pkg_mgr == "yum" + action: "{{ ansible_pkg_mgr }} name=cloud-utils-growpart state=present" + when: not openshift.common.is_containerized | bool -- name: Ensure growpart is installed - dnf: pkg=cloud-utils-growpart state=present - when: ansible_pkg_mgr == "dnf" +- name: Determine if growpart is installed + command: "rpm -q cloud-utils-growpart" + register: has_growpart + failed_when: "has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout" + changed_when: false + when: openshift.common.is_containerized | bool - name: Grow the partitions command: "growpart {{oep_drive}} {{oep_partition}}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 3f8f6b5af..40e54d706 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -188,9 +188,6 @@ def normalize_gce_facts(metadata, facts): _, _, zone = metadata['instance']['zone'].rpartition('/') facts['zone'] = zone - # Default to no sdn for GCE deployments - facts['use_openshift_sdn'] = False - # GCE currently only supports a single interface facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0] pub_ip = facts['network']['interfaces'][0]['public_ips'][0] @@ -478,52 +475,68 @@ def set_url_facts_if_unset(facts): were not already present """ if 'master' in facts: - api_use_ssl = facts['master']['api_use_ssl'] - api_port = facts['master']['api_port'] - console_use_ssl = facts['master']['console_use_ssl'] - console_port = facts['master']['console_port'] - console_path = facts['master']['console_path'] - etcd_use_ssl = facts['master']['etcd_use_ssl'] - etcd_hosts = facts['master']['etcd_hosts'] - etcd_port = facts['master']['etcd_port'] hostname = facts['common']['hostname'] - public_hostname = facts['common']['public_hostname'] cluster_hostname = facts['master'].get('cluster_hostname') cluster_public_hostname = facts['master'].get('cluster_public_hostname') + public_hostname = facts['common']['public_hostname'] + api_hostname = cluster_hostname if cluster_hostname else hostname + api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname + console_path = facts['master']['console_path'] + etcd_hosts = facts['master']['etcd_hosts'] + + use_ssl = dict( + api=facts['master']['api_use_ssl'], + public_api=facts['master']['api_use_ssl'], + loopback_api=facts['master']['api_use_ssl'], + console=facts['master']['console_use_ssl'], + public_console=facts['master']['console_use_ssl'], + etcd=facts['master']['etcd_use_ssl'] + ) + + ports = dict( + api=facts['master']['api_port'], + public_api=facts['master']['api_port'], + loopback_api=facts['master']['api_port'], + console=facts['master']['console_port'], + public_console=facts['master']['console_port'], + etcd=facts['master']['etcd_port'], + ) + + etcd_urls = [] + if etcd_hosts != '': + facts['master']['etcd_port'] = ports['etcd'] + facts['master']['embedded_etcd'] = False + for host in etcd_hosts: + etcd_urls.append(format_url(use_ssl['etcd'], host, + ports['etcd'])) + else: + etcd_urls = [format_url(use_ssl['etcd'], hostname, + ports['etcd'])] + + facts['master'].setdefault('etcd_urls', etcd_urls) + + prefix_hosts = [('api', api_hostname), + ('public_api', api_public_hostname), + ('loopback_api', hostname)] + + for prefix, host in prefix_hosts: + facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix], + host, + ports[prefix])) + + + r_lhn = "{0}:{1}".format(api_hostname, ports['api']).replace('.', '-') + facts['master'].setdefault('loopback_cluster_name', r_lhn) + facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn)) + facts['master'].setdefault('loopback_user', "system:openshift-master/{0}".format(r_lhn)) + + prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)] + for prefix, host in prefix_hosts: + facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix], + host, + ports[prefix], + console_path)) - if 'etcd_urls' not in facts['master']: - etcd_urls = [] - if etcd_hosts != '': - facts['master']['etcd_port'] = etcd_port - facts['master']['embedded_etcd'] = False - for host in etcd_hosts: - etcd_urls.append(format_url(etcd_use_ssl, host, - etcd_port)) - else: - etcd_urls = [format_url(etcd_use_ssl, hostname, - etcd_port)] - facts['master']['etcd_urls'] = etcd_urls - if 'api_url' not in facts['master']: - api_hostname = cluster_hostname if cluster_hostname else hostname - facts['master']['api_url'] = format_url(api_use_ssl, api_hostname, - api_port) - if 'public_api_url' not in facts['master']: - api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname - facts['master']['public_api_url'] = format_url(api_use_ssl, - api_public_hostname, - api_port) - if 'console_url' not in facts['master']: - console_hostname = cluster_hostname if cluster_hostname else hostname - facts['master']['console_url'] = format_url(console_use_ssl, - console_hostname, - console_port, - console_path) - if 'public_console_url' not in facts['master']: - console_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname - facts['master']['public_console_url'] = format_url(console_use_ssl, - console_public_hostname, - console_port, - console_path) return facts def set_aggregate_facts(facts): @@ -645,7 +658,7 @@ def set_deployment_facts_if_unset(facts): facts['common']['service_type'] = service_type if 'config_base' not in facts['common']: config_base = '/etc/origin' - if deployment_type in ['enterprise', 'online']: + if deployment_type in ['enterprise']: config_base = '/etc/openshift' # Handle upgrade scenarios when symlinks don't yet exist: if not os.path.exists(config_base) and os.path.exists('/etc/openshift'): @@ -653,13 +666,26 @@ def set_deployment_facts_if_unset(facts): facts['common']['config_base'] = config_base if 'data_dir' not in facts['common']: data_dir = '/var/lib/origin' - if deployment_type in ['enterprise', 'online']: + if deployment_type in ['enterprise']: data_dir = '/var/lib/openshift' # Handle upgrade scenarios when symlinks don't yet exist: if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'): data_dir = '/var/lib/openshift' facts['common']['data_dir'] = data_dir + # remove duplicate and empty strings from registry lists + for cat in ['additional', 'blocked', 'insecure']: + key = 'docker_{0}_registries'.format(cat) + if key in facts['common']: + facts['common'][key] = list(set(facts['common'][key]) - set([''])) + + + if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']: + addtl_regs = facts['common'].get('docker_additional_registries', []) + ent_reg = 'registry.access.redhat.com' + if ent_reg not in addtl_regs: + facts['common']['docker_additional_registries'] = addtl_regs + [ent_reg] + for role in ('master', 'node'): if role in facts: deployment_type = facts['common']['deployment_type'] @@ -707,11 +733,36 @@ def set_version_facts_if_unset(facts): if version is not None: if deployment_type == 'origin': version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6') + version_gt_3_1_1_or_1_1_1 = LooseVersion(version) > LooseVersion('1.1.1') else: version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900') + version_gt_3_1_1_or_1_1_1 = LooseVersion(version) > LooseVersion('3.1.1') else: version_gt_3_1_or_1_1 = True + version_gt_3_1_1_or_1_1_1 = True facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1 + facts['common']['version_greater_than_3_1_1_or_1_1_1'] = version_gt_3_1_1_or_1_1_1 + + return facts + +def set_manageiq_facts_if_unset(facts): + """ Set manageiq facts. This currently includes common.use_manageiq. + + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with version facts. + Raises: + OpenShiftFactsInternalError: + """ + if 'common' not in facts: + if 'version_greater_than_3_1_or_1_1' not in facts['common']: + raise OpenShiftFactsInternalError( + "Invalid invocation: The required facts are not set" + ) + if 'use_manageiq' not in facts['common']: + facts['common']['use_manageiq'] = facts['common']['version_greater_than_3_1_or_1_1'] + return facts def set_sdn_facts_if_unset(facts, system_facts): @@ -727,7 +778,8 @@ def set_sdn_facts_if_unset(facts, system_facts): if 'common' in facts: use_sdn = facts['common']['use_openshift_sdn'] if not (use_sdn == '' or isinstance(use_sdn, bool)): - facts['common']['use_openshift_sdn'] = bool(strtobool(str(use_sdn))) + use_sdn = bool(strtobool(str(use_sdn))) + facts['common']['use_openshift_sdn'] = use_sdn if 'sdn_network_plugin_name' not in facts['common']: plugin = 'redhat/openshift-ovs-subnet' if use_sdn else '' facts['common']['sdn_network_plugin_name'] = plugin @@ -862,10 +914,6 @@ def apply_provider_facts(facts, provider_facts): if not provider_facts: return facts - use_openshift_sdn = provider_facts.get('use_openshift_sdn') - if isinstance(use_openshift_sdn, bool): - facts['common']['use_openshift_sdn'] = use_openshift_sdn - common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')] for h_var, ip_var in common_vars: ip_value = provider_facts['network'].get(ip_var) @@ -936,6 +984,7 @@ def save_local_facts(filename, facts): os.makedirs(fact_dir) with open(filename, 'w') as fact_file: fact_file.write(module.jsonify(facts)) + os.chmod(filename, 0o600) except (IOError, OSError) as ex: raise OpenShiftFactsFileWriteError( "Could not create fact file: %s, error: %s" % (filename, ex) @@ -971,6 +1020,62 @@ def get_local_facts_from_file(filename): return local_facts +def set_container_facts_if_unset(facts): + """ Set containerized facts. + + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with the generated containerization + facts + """ + deployment_type = facts['common']['deployment_type'] + if deployment_type in ['enterprise', 'openshift-enterprise']: + master_image = 'openshift3/ose' + cli_image = master_image + node_image = 'openshift3/node' + ovs_image = 'openshift3/openvswitch' + etcd_image = 'registry.access.redhat.com/rhel7/etcd' + elif deployment_type == 'atomic-enterprise': + master_image = 'aep3_beta/aep' + cli_image = master_image + node_image = 'aep3_beta/node' + ovs_image = 'aep3_beta/openvswitch' + etcd_image = 'registry.access.redhat.com/rhel7/etcd' + else: + master_image = 'openshift/origin' + cli_image = master_image + node_image = 'openshift/node' + ovs_image = 'openshift/openvswitch' + etcd_image = 'registry.access.redhat.com/rhel7/etcd' + + facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') + if 'is_containerized' not in facts['common']: + facts['common']['is_containerized'] = facts['common']['is_atomic'] + if 'cli_image' not in facts['common']: + facts['common']['cli_image'] = cli_image + if 'etcd' in facts and 'etcd_image' not in facts['etcd']: + facts['etcd']['etcd_image'] = etcd_image + if 'master' in facts and 'master_image' not in facts['master']: + facts['master']['master_image'] = master_image + if 'node' in facts: + if 'node_image' not in facts['node']: + facts['node']['node_image'] = node_image + if 'ovs_image' not in facts['node']: + facts['node']['ovs_image'] = ovs_image + + if facts['common']['is_containerized']: + facts['common']['admin_binary'] = '/usr/local/bin/oadm' + facts['common']['client_binary'] = '/usr/local/bin/oc' + + return facts + + +class OpenShiftFactsInternalError(Exception): + """Origin Facts Error""" + pass + + class OpenShiftFactsUnsupportedRoleError(Exception): """Origin Facts Unsupported Role Error""" pass @@ -993,6 +1098,7 @@ class OpenShiftFacts(object): facts (dict): facts for the host Args: + module (AnsibleModule): an AnsibleModule object role (str): role for setting local facts filename (str): local facts file to use local_facts (dict): local facts to set @@ -1002,7 +1108,7 @@ class OpenShiftFacts(object): Raises: OpenShiftFactsUnsupportedRoleError: """ - known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'etcd'] + known_roles = ['common', 'master', 'node', 'etcd', 'nfs'] def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False): self.changed = False @@ -1047,8 +1153,10 @@ class OpenShiftFacts(object): facts = set_sdn_facts_if_unset(facts, self.system_facts) facts = set_deployment_facts_if_unset(facts) facts = set_version_facts_if_unset(facts) + facts = set_manageiq_facts_if_unset(facts) facts = set_aggregate_facts(facts) facts = set_etcd_facts_if_unset(facts) + facts = set_container_facts_if_unset(facts) return dict(openshift=facts) def get_defaults(self, roles): @@ -1071,14 +1179,15 @@ class OpenShiftFacts(object): common = dict(use_openshift_sdn=True, ip=ip_addr, public_ip=ip_addr, deployment_type='origin', hostname=hostname, - public_hostname=hostname, use_manageiq=False) - common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc' - common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm' + public_hostname=hostname) + common['client_binary'] = 'oc' + common['admin_binary'] = 'oadm' common['dns_domain'] = 'cluster.local' + common['install_examples'] = True defaults['common'] = common if 'master' in roles: - master = dict(api_use_ssl=True, api_port='8443', + master = dict(api_use_ssl=True, api_port='8443', controllers_port='8444', console_use_ssl=True, console_path='/console', console_port='8443', etcd_use_ssl=True, etcd_hosts='', etcd_port='4001', portal_net='172.30.0.0/16', @@ -1095,6 +1204,12 @@ class OpenShiftFacts(object): node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16', iptables_sync_period='5s', set_node_ip=False) defaults['node'] = node + + if 'nfs' in roles: + nfs = dict(exports_dir='/var/export', registry_volume='regvol', + export_options='*(rw,sync,all_squash)') + defaults['nfs'] = nfs + return defaults def guess_host_provider(self): @@ -1206,14 +1321,78 @@ class OpenShiftFacts(object): del facts[key] if new_local_facts != local_facts: + self.validate_local_facts(new_local_facts) changed = True - if not module.check_mode: save_local_facts(self.filename, new_local_facts) self.changed = changed return new_local_facts + def validate_local_facts(self, facts=None): + """ Validate local facts + + Args: + facts (dict): local facts to validate + """ + invalid_facts = dict() + invalid_facts = self.validate_master_facts(facts, invalid_facts) + if invalid_facts: + msg = 'Invalid facts detected:\n' + for key in invalid_facts.keys(): + msg += '{0}: {1}\n'.format(key, invalid_facts[key]) + module.fail_json(msg=msg, + changed=self.changed) + + # disabling pylint errors for line-too-long since we're dealing + # with best effort reduction of error messages here. + # disabling errors for too-many-branches since we require checking + # many conditions. + # pylint: disable=line-too-long, too-many-branches + @staticmethod + def validate_master_facts(facts, invalid_facts): + """ Validate master facts + + Args: + facts (dict): local facts to validate + invalid_facts (dict): collected invalid_facts + + Returns: + dict: Invalid facts + """ + if 'master' in facts: + # openshift.master.session_auth_secrets + if 'session_auth_secrets' in facts['master']: + session_auth_secrets = facts['master']['session_auth_secrets'] + if not issubclass(type(session_auth_secrets), list): + invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.' + elif 'session_encryption_secrets' not in facts['master']: + invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set ' + 'if openshift_master_session_auth_secrets is provided.') + elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']): + invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and ' + 'openshift_master_session_encryption_secrets must be ' + 'equal length.') + else: + for secret in session_auth_secrets: + if len(secret) < 32: + invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. ' + 'Secrets must be at least 32 characters in length.') + # openshift.master.session_encryption_secrets + if 'session_encryption_secrets' in facts['master']: + session_encryption_secrets = facts['master']['session_encryption_secrets'] + if not issubclass(type(session_encryption_secrets), list): + invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.' + elif 'session_auth_secrets' not in facts['master']: + invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be ' + 'set if openshift_master_session_encryption_secrets ' + 'is provided.') + else: + for secret in session_encryption_secrets: + if len(secret) not in [16, 24, 32]: + invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. ' + 'Secrets must be 16, 24, or 32 characters in length.') + return invalid_facts def main(): """ main """ diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index 2e889d7d5..e40a1b329 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -5,18 +5,26 @@ - ansible_version | version_compare('1.8.0', 'ge') - ansible_version | version_compare('1.9.0', 'ne') - ansible_version | version_compare('1.9.0.1', 'ne') + +- name: Detecting Operating System + shell: ls /run/ostree-booted + ignore_errors: yes + failed_when: false + changed_when: false + register: ostree_output -- name: Ensure PyYaml is installed - yum: pkg={{ item }} state=installed - when: ansible_pkg_mgr == "yum" - with_items: - - PyYAML +# Locally setup containerized facts for now +- set_fact: + l_is_atomic: "{{ ostree_output.rc == 0 }}" +- set_fact: + l_is_containerized: "{{ l_is_atomic or containerized | default(false) | bool }}" - name: Ensure PyYaml is installed - dnf: pkg={{ item }} state=installed - when: ansible_pkg_mgr == "dnf" - with_items: - - PyYAML + action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" + when: not l_is_atomic | bool -- name: Gather Cluster facts +- name: Gather Cluster facts and set is_containerized if needed openshift_facts: + role: common + local_facts: + is_containerized: "{{ containerized | default(None) }}" diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 637e494ea..06f12053a 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -5,6 +5,7 @@ until: omd_get_node.rc == 0 retries: 20 delay: 5 + changed_when: false with_items: openshift_nodes - name: Set node schedulability diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index 2d3187e21..0357fc85a 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -1,4 +1,8 @@ --- +- fail: + msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1." + when: not openshift.common.version_greater_than_3_1_or_1_1 | bool + - name: Copy Configuration to temporary conf command: > cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}} diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 9766d01ae..1f74d851a 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -6,7 +6,9 @@ os_firewall_allow: - service: etcd embedded port: 4001/tcp - service: api server https - port: 8443/tcp + port: "{{ openshift.master.api_port }}/tcp" +- service: api controllers https + port: "{{ openshift.master.controllers_port }}/tcp" - service: dns tcp port: 53/tcp - service: dns udp @@ -24,7 +26,5 @@ os_firewall_allow: os_firewall_deny: - service: api server http port: 8080/tcp -- service: former web console port - port: 8444/tcp - service: former etcd peer port port: 7001/tcp diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 4b9500cbd..6b9992eea 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -1,14 +1,25 @@ --- - name: restart master service: name={{ openshift.common.service_type }}-master state=restarted - when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false)) + when: (not openshift_master_ha | bool) and (not (master_service_status_changed | default(false) | bool)) + notify: Verify API Server - name: restart master api service: name={{ openshift.common.service_type }}-master-api state=restarted - when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native' + when: (openshift_master_ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + notify: Verify API Server -# TODO: need to fix up ignore_errors here - name: restart master controllers service: name={{ openshift.common.service_type }}-master-controllers state=restarted - when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native' - ignore_errors: yes + when: (openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + +- name: Verify API Server + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl -k --head --silent {{ openshift.master.api_url }} + register: api_available_output + until: api_available_output.stdout.find("200 OK") != -1 + retries: 120 + delay: 1 + changed_when: false diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index c125cb5d0..8db99fc2a 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - { role: openshift_common } +- { role: openshift_cli } diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 011b5dedd..57b50bee4 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -9,7 +9,6 @@ Invalid OAuth grant method: {{ openshift_master_oauth_grant_method }} when: openshift_master_oauth_grant_method is defined and openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods - # HA Variable Validation - fail: msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations" @@ -20,6 +19,9 @@ - fail: msg: "openshift_master_cluster_password must be set for multi-master installations" when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password) +- fail: + msg: "Pacemaker based HA is not supported at this time when used with containerized installs" + when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and openshift.common.is_containerized | bool - name: Set master facts openshift_facts: @@ -38,7 +40,14 @@ console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" - etcd_hosts: "{{ openshift_master_etcd_hosts | default(None)}}" + logging_public_url: "{{ openshift_master_logging_public_url | default(None) }}" + metrics_public_url: "{{ openshift_master_metrics_public_url | default(None) }}" + logout_url: "{{ openshift_master_logout_url | default(None) }}" + extension_scripts: "{{ openshift_master_extension_scripts | default(None) }}" + extension_stylesheets: "{{ openshift_master_extension_stylesheets | default(None) }}" + extensions: "{{ openshift_master_extensions | default(None) }}" + oauth_template: "{{ openshift_master_oauth_template | default(None) }}" + etcd_hosts: "{{ openshift_master_etcd_hosts | default(None) }}" etcd_port: "{{ openshift_master_etcd_port | default(None) }}" etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}" etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}" @@ -47,12 +56,13 @@ embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}" dns_port: "{{ openshift_master_dns_port | default(None) }}" bind_addr: "{{ openshift_master_bind_addr | default(None) }}" + pod_eviction_timeout: "{{ openshift_master_pod_eviction_timeout | default(None) }}" portal_net: "{{ openshift_master_portal_net | default(None) }}" session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}" session_name: "{{ openshift_master_session_name | default(None) }}" + session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}" session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(None) }}" session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(None) }}" - session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}" access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}" auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}" identity_providers: "{{ openshift_master_identity_providers | default(None) }}" @@ -76,16 +86,36 @@ disabled_features: "{{ osm_disabled_features | default(None) }}" master_count: "{{ openshift_master_count | default(None) }}" controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}" + master_image: "{{ osm_image | default(None) }}" - name: Install Master package - yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present - when: ansible_pkg_mgr == "yum" - register: install_result + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=present" + when: not openshift.common.is_containerized | bool -- name: Install Master package - dnf: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present - when: ansible_pkg_mgr == "dnf" +- name: Pull master image + command: > + docker pull {{ openshift.master.master_image }} + when: openshift.common.is_containerized | bool + +- name: Install Master docker service file + template: + dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service" + src: docker/master.docker.service.j2 register: install_result + when: openshift.common.is_containerized | bool and not openshift_master_ha | bool + +- name: Create openshift.common.data_dir + file: + path: "{{ openshift.common.data_dir }}" + state: directory + mode: 0755 + owner: root + group: root + when: openshift.common.is_containerized | bool + +- name: Reload systemd units + command: systemctl daemon-reload + when: openshift.common.is_containerized | bool and install_result | changed - name: Re-gather package dependent master facts openshift_facts: @@ -117,13 +147,9 @@ - restart master controllers - name: Install httpd-tools if needed - yum: pkg=httpd-tools state=present - when: (ansible_pkg_mgr == "yum") and (item.kind == 'HTPasswdPasswordIdentityProvider') - with_items: openshift.master.identity_providers - -- name: Install httpd-tools if needed - dnf: pkg=httpd-tools state=present - when: (ansible_pkg_mgr == "dnf") and (item.kind == 'HTPasswdPasswordIdentityProvider') + action: "{{ ansible_pkg_mgr }} name=httpd-tools state=present" + when: (item.kind == 'HTPasswdPasswordIdentityProvider') and + not openshift.common.is_atomic | bool with_items: openshift.master.identity_providers - name: Ensure htpasswd directory exists @@ -142,40 +168,60 @@ when: item.kind == 'HTPasswdPasswordIdentityProvider' with_items: openshift.master.identity_providers +- name: Init HA Service Info + set_fact: + ha_suffix: "" + ha_svcdir: "/usr/lib/systemd/system" + +- name: Set HA Service Info for containerized installs + set_fact: + ha_suffix: ".docker" + ha_svcdir: "/etc/systemd/system" + when: openshift.common.is_containerized | bool + # workaround for missing systemd unit files for controllers/api -- name: Create the api service file - template: - src: atomic-openshift-master-api.service.j2 - dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-api.service - force: no - when: openshift_master_ha | bool and openshift_master_cluster_method == "native" -- name: Create the controllers service file +- name: Create the systemd unit files template: - src: atomic-openshift-master-controllers.service.j2 - dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-controllers.service - force: no + src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2" + dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-{{ item }}.service" when: openshift_master_ha | bool and openshift_master_cluster_method == "native" -- name: Create the api env file + with_items: + - api + - controllers + register: create_unit_files + +- command: systemctl daemon-reload + when: create_unit_files | changed +# end workaround for missing systemd unit files + +- name: Create the master api service env file template: - src: atomic-openshift-master-api.j2 + src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2" dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api - force: no when: openshift_master_ha | bool and openshift_master_cluster_method == "native" -- name: Create the controllers env file + notify: + - restart master api + +- name: Create the master controllers service env file template: - src: atomic-openshift-master-controllers.j2 + src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2" dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers - force: no when: openshift_master_ha | bool and openshift_master_cluster_method == "native" -- command: systemctl daemon-reload - when: openshift_master_ha | bool and openshift_master_cluster_method == "native" -# end workaround for missing systemd unit files + notify: + - restart master controllers + +- name: Create the master service env file + template: + src: "atomic-openshift-master.j2" + dest: /etc/sysconfig/{{ openshift.common.service_type }}-master + notify: + - restart master - name: Create session secrets file template: dest: "{{ openshift.master.session_secrets_file }}" src: sessionSecretsFile.yaml.v1.j2 - force: no + when: openshift.master.session_auth_secrets is defined and openshift.master.session_encryption_secrets is defined notify: - restart master - restart master api @@ -194,55 +240,49 @@ - restart master api - restart master controllers -- name: Configure master settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-master - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - create: yes - with_items: - - regex: '^OPTIONS=' - line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}" - - regex: '^CONFIG_FILE=' - line: "CONFIG_FILE={{ openshift_master_config_file }}" - notify: - - restart master - -- name: Configure master api settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - with_items: - - regex: '^OPTIONS=' - line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8443 --master=https://{{ openshift.common.ip }}:8443" - - regex: '^CONFIG_FILE=' - line: "CONFIG_FILE={{ openshift_master_config_file }}" - when: openshift_master_ha | bool and openshift_master_cluster_method == "native" - notify: - - restart master api - -- name: Configure master controller settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - with_items: - - regex: '^OPTIONS=' - line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8444" - - regex: '^CONFIG_FILE=' - line: "CONFIG_FILE={{ openshift_master_config_file }}" - when: openshift_master_ha | bool and openshift_master_cluster_method == "native" - notify: - - restart master controllers +- name: Test local loopback context + command: > + {{ openshift.common.client_binary }} config view + --config={{ openshift_master_loopback_config }} + changed_when: false + register: loopback_config + +- command: > + {{ openshift.common.client_binary }} config set-cluster + --certificate-authority={{ openshift_master_config_dir }}/ca.crt + --embed-certs=true --server={{ openshift.master.loopback_api_url }} + {{ openshift.master.loopback_cluster_name }} + --config={{ openshift_master_loopback_config }} + when: loopback_context_string not in loopback_config.stdout + register: set_loopback_cluster + +- command: > + {{ openshift.common.client_binary }} config set-context + --cluster={{ openshift.master.loopback_cluster_name }} + --namespace=default --user={{ openshift.master.loopback_user }} + {{ openshift.master.loopback_context_name }} + --config={{ openshift_master_loopback_config }} + when: set_loopback_cluster | changed + register: set_loopback_context + +- command: > + {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }} + --config={{ openshift_master_loopback_config }} + when: set_loopback_context | changed + register: set_current_context - name: Start and enable master service: name={{ openshift.common.service_type }}-master enabled=yes state=started when: not openshift_master_ha | bool register: start_result + notify: Verify API Server + +- name: Stop and disable non HA master when running HA + service: name={{ openshift.common.service_type }}-master enabled=no state=stopped + when: openshift_master_ha | bool - set_fact: - master_service_status_changed = start_result | changed + master_service_status_changed: "{{ start_result | changed }}" when: not openshift_master_ha | bool - name: Start and enable master api @@ -251,29 +291,42 @@ register: start_result - set_fact: - master_api_service_status_changed = start_result | changed + master_api_service_status_changed: "{{ start_result | changed }}" when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' -# TODO: fix the ugly workaround of setting ignore_errors -# the controllers service tries to start even if it is already started +# A separate wait is required here for native HA since notifies will +# be resolved after all tasks in the role. +- name: Wait for API to become available + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl -k --head --silent {{ openshift.master.api_url }} + register: api_available_output + until: api_available_output.stdout.find("200 OK") != -1 + retries: 120 + delay: 1 + changed_when: false + when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool + - name: Start and enable master controller service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' register: start_result - ignore_errors: yes - set_fact: - master_controllers_service_status_changed = start_result | changed + master_controllers_service_status_changed: "{{ start_result | changed }}" when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' - name: Install cluster packages - action: "{{ansible_pkg_mgr}} pkg=pcs state=present" + action: "{{ ansible_pkg_mgr }} name=pcs state=present" when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' + and not openshift.common.is_containerized | bool register: install_result - name: Start and enable cluster service service: name=pcsd enabled=yes state=started when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' + and not openshift.common.is_containerized | bool - name: Set the cluster user password shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster @@ -281,6 +334,7 @@ - name: Lookup default group for ansible_ssh_user command: "/usr/bin/id -g {{ ansible_ssh_user }}" + changed_when: false register: _ansible_ssh_user_gid - name: Create the client config dir(s) diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2 index 205934248..81bae5470 100644 --- a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 +++ b/roles/openshift_master/templates/atomic-openshift-master.j2 @@ -1,5 +1,5 @@ -OPTIONS= -CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml +OPTIONS=--loglevel={{ openshift.master.debug_level }} +CONFIG_FILE={{ openshift_master_config_file }} # Proxy configuration # Origin uses standard HTTP_PROXY environment variables. Be sure to set diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j2 new file mode 120000 index 000000000..4bb7095ee --- /dev/null +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j2 @@ -0,0 +1 @@ +../native-cluster/atomic-openshift-master-api.j2
\ No newline at end of file diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 new file mode 100644 index 000000000..a935b82f6 --- /dev/null +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -0,0 +1,26 @@ +[Unit] +Description=Atomic OpenShift Master API +Documentation=https://github.com/openshift/origin +After=network.target +After=etcd.service +Before={{ openshift.common.service_type }}-node.service +Requires=network.target +Requires=docker.service +PartOf=docker.service + +[Service] +EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api +Environment=GOTRACEBACK=crash +ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api +ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {{ openshift.master.master_image }} start master api --config=${CONFIG_FILE} $OPTIONS +ExecStartPost=/usr/bin/sleep 10 +ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api +LimitNOFILE=131072 +LimitCORE=infinity +WorkingDirectory={{ openshift.common.data_dir }} +SyslogIdentifier=atomic-openshift-master-api +Restart=always + +[Install] +WantedBy=multi-user.target +WantedBy={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j2 new file mode 120000 index 000000000..8714ebbae --- /dev/null +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j2 @@ -0,0 +1 @@ +../native-cluster/atomic-openshift-master-controllers.j2
\ No newline at end of file diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 new file mode 100644 index 000000000..6ba7d6e2a --- /dev/null +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -0,0 +1,25 @@ +[Unit] +Description=Atomic OpenShift Master Controllers +Documentation=https://github.com/openshift/origin +After=network.target +After={{ openshift.common.service_type }}-master-api.service +Before={{ openshift.common.service_type }}-node.service +Requires=docker.service +PartOf=docker.service + +[Service] +EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers +Environment=GOTRACEBACK=crash +ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers +ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {{ openshift.master.master_image }} start master controllers --config=${CONFIG_FILE} $OPTIONS +ExecStartPost=/usr/bin/sleep 10 +ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers +LimitNOFILE=131072 +LimitCORE=infinity +WorkingDirectory={{ openshift.common.data_dir }} +SyslogIdentifier={{ openshift.common.service_type }}-master-controllers +Restart=on-failure + +[Install] +WantedBy=multi-user.target +WantedBy={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_master/templates/docker/master.docker.service.j2 b/roles/openshift_master/templates/docker/master.docker.service.j2 new file mode 100644 index 000000000..23781a313 --- /dev/null +++ b/roles/openshift_master/templates/docker/master.docker.service.j2 @@ -0,0 +1,16 @@ +[Unit] +After=docker.service +Before={{ openshift.common.service_type }}-node.service +Requires=docker.service +PartOf=docker.service + +[Service] +EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master +ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master +ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {{ openshift.master.master_image }} start master --config=${CONFIG_FILE} $OPTIONS +ExecStartPost=/usr/bin/sleep 10 +ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 2b399e77d..1eeab46fe 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -5,9 +5,24 @@ apiLevels: - v1 apiVersion: v1 assetConfig: - logoutURL: "" + logoutURL: "{{ openshift.master.logout_url | default('') }}" masterPublicURL: {{ openshift.master.public_api_url }} publicURL: {{ openshift.master.public_console_url }}/ +{% if 'logging_public_url' in openshift.master %} + loggingPublicURL: {{ openshift.master.logging_public_url }} +{% endif %} +{% if 'metrics_public_url' in openshift.master %} + metricsPublicURL: {{ openshift.master.metrics_public_url }} +{% endif %} +{% if 'extension_scripts' in openshift.master %} + extensionScripts: {{ openshift.master.extension_scripts | to_padded_yaml(1, 2) }} +{% endif %} +{% if 'extension_stylesheets' in openshift.master %} + extensionStylesheets: {{ openshift.master.extension_stylesheets | to_padded_yaml(1, 2) }} +{% endif %} +{% if 'extensions' in openshift.master %} + extensions: {{ openshift.master.extensions | to_padded_yaml(1, 2) }} +{% endif %} servingInfo: bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }} bindNetwork: tcp4 @@ -81,11 +96,11 @@ kubernetesMasterConfig: - v1beta3 - v1 {% endif %} - apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }} - controllerArguments: {{ controller_args if controller_args is defined else 'null' }} + apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }} + controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }} masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }} masterIP: {{ openshift.common.ip }} - podEvictionTimeout: "" + podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }} proxyClientInfo: certFile: master.proxy-client.crt keyFile: master.proxy-client.key @@ -108,6 +123,10 @@ networkConfig: # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet serviceNetworkCIDR: {{ openshift.master.portal_net }} oauthConfig: +{% if 'oauth_template' in openshift.master %} + templates: + login: {{ openshift.master.oauth_template }} +{% endif %} assetPublicURL: {{ openshift.master.public_console_url }}/ grantConfig: method: {{ openshift.master.oauth_grant_method }} @@ -121,7 +140,9 @@ oauthConfig: sessionConfig: sessionMaxAgeSeconds: {{ openshift.master.session_max_seconds }} sessionName: {{ openshift.master.session_name }} +{% if openshift.master.session_auth_secrets is defined and openshift.master.session_encryption_secrets is defined %} sessionSecretsFile: {{ openshift.master.session_secrets_file }} +{% endif %} tokenConfig: accessTokenMaxAgeSeconds: {{ openshift.master.access_token_max_seconds }} authorizeTokenMaxAgeSeconds: {{ openshift.master.auth_token_max_seconds }} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 new file mode 100644 index 000000000..48bfa5f04 --- /dev/null +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 @@ -0,0 +1,9 @@ +OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }} +CONFIG_FILE={{ openshift_master_config_file }} + +# Proxy configuration +# Origin uses standard HTTP_PROXY environment variables. Be sure to set +# NO_PROXY for your master +#NO_PROXY=master.example.com +#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT +#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 index ba19fb348..ba19fb348 100644 --- a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 index 205934248..cdc56eece 100644 --- a/roles/openshift_master/templates/atomic-openshift-master-api.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 @@ -1,5 +1,5 @@ -OPTIONS= -CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml +OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }} +CONFIG_FILE={{ openshift_master_config_file }} # Proxy configuration # Origin uses standard HTTP_PROXY environment variables. Be sure to set diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 index 8952c86ef..e6e97b24f 100644 --- a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 @@ -7,7 +7,11 @@ Before={{ openshift.common.service_type }}-node.service Requires=network.target [Service] +{% if openshift.common.version_greater_than_3_1_1_or_1_1_1 | bool %} Type=notify +{% else %} +Type=simple +{% endif %} EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers Environment=GOTRACEBACK=crash ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS diff --git a/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 index d12d9db90..3d4b573a9 100644 --- a/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 +++ b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 @@ -1,7 +1,7 @@ apiVersion: v1 kind: SessionSecrets secrets: -{% for secret in openshift_master_session_auth_secrets %} -- authentication: "{{ openshift_master_session_auth_secrets[loop.index0] }}" - encryption: "{{ openshift_master_session_encryption_secrets[loop.index0] }}" +{% for secret in openshift.master.session_auth_secrets %} +- authentication: "{{ openshift.master.session_auth_secrets[loop.index0] }}" + encryption: "{{ openshift.master.session_encryption_secrets[loop.index0] }}" {% endfor %} diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml index 534465451..fe88c3c16 100644 --- a/roles/openshift_master/vars/main.yml +++ b/roles/openshift_master/vars/main.yml @@ -1,11 +1,16 @@ --- openshift_master_config_dir: "{{ openshift.common.config_base }}/master" openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml" +openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig" +loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}" openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json" openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml" openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json" openshift_version: "{{ openshift_pkg_version | default('') }}" +ha_svc_template_path: "{{ 'docker-cluster' if openshift.common.is_containerized | bool else 'native-cluster' }}" +ha_svc_svc_dir: "{{ '/etc/systemd/system' if openshift.common.is_containerized | bool else '/usr/lib/systemd/system' }}" + openshift_master_valid_grant_methods: - auto - prompt diff --git a/roles/openshift_master_ca/meta/main.yml b/roles/openshift_master_ca/meta/main.yml index 0c8881521..b5dd466c9 100644 --- a/roles/openshift_master_ca/meta/main.yml +++ b/roles/openshift_master_ca/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info: - system dependencies: - { role: openshift_repos } +- { role: openshift_cli } diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml index caac13be3..6d9be81c0 100644 --- a/roles/openshift_master_ca/tasks/main.yml +++ b/roles/openshift_master_ca/tasks/main.yml @@ -1,22 +1,23 @@ --- - name: Install the base package for admin tooling - yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present - when: ansible_pkg_mgr == "yum" - register: install_result - -- name: Install the base package for admin tooling - dnf: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version }} state=present" + when: not openshift.common.is_containerized | bool register: install_result - name: Reload generated facts openshift_facts: + when: install_result | changed - name: Create openshift_master_config_dir if it doesn't exist file: path: "{{ openshift_master_config_dir }}" state: directory +- name: Pull master docker image + command: > + docker pull {{ openshift.common.cli_image }} + when: openshift.common.is_containerized | bool + - name: Create the master certificates if they do not already exist command: > {{ openshift.common.admin_binary }} create-master-certs diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 13e5d7a4b..36d953111 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -16,6 +16,8 @@ - admin.kubeconfig - master.kubelet-client.crt - master.kubelet-client.key + - master.server.crt + - master.server.key - openshift-master.crt - openshift-master.key - openshift-master.kubeconfig diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml index 7ab9afb51..1b94598dd 100644 --- a/roles/openshift_master_cluster/tasks/configure.yml +++ b/roles/openshift_master_cluster/tasks/configure.yml @@ -34,11 +34,10 @@ - name: Disable stonith command: pcs property set stonith-enabled=false -# TODO: handle case where api port is not 8443 - name: Wait for the clustered master service to be available wait_for: host: "{{ openshift_master_cluster_vip }}" - port: 8443 + port: "{{ openshift.master.api_port }}" state: started timeout: 180 delay: 90 diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml index 6303a6e46..40705d357 100644 --- a/roles/openshift_master_cluster/tasks/main.yml +++ b/roles/openshift_master_cluster/tasks/main.yml @@ -1,4 +1,8 @@ --- +- fail: + msg: "Not possible on atomic hosts for now" + when: openshift.common.is_containerized | bool + - name: Test if cluster is already configured command: pcs status register: pcs_status diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 447ca85f3..c288f4d05 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -1,7 +1,7 @@ --- - name: restart node service: name={{ openshift.common.service_type }}-node state=restarted - when: not node_service_status_changed | default(false) + when: not (node_service_status_changed | default(false) | bool) - name: restart docker service: name=docker state=restarted diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 9d40ae3b3..c92008a77 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -13,4 +13,3 @@ galaxy_info: - cloud dependencies: - { role: openshift_common } -- { role: docker } diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 110556b4a..9035248f9 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -22,8 +22,6 @@ local_facts: annotations: "{{ openshift_node_annotations | default(none) }}" debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" - docker_log_driver: "{{ lookup( 'oo_option' , 'docker_log_driver' ) | default('',True) }}" - docker_log_options: "{{ lookup( 'oo_option' , 'docker_log_options' ) | default('',True) }}" iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}" @@ -33,28 +31,51 @@ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" set_node_ip: "{{ openshift_set_node_ip | default(None) }}" + node_image: "{{ osn_image | default(None) }}" + ovs_image: "{{ osn_ovs_image | default(None) }}" # We have to add tuned-profiles in the same transaction otherwise we run into depsolving -# problems because the rpms don't pin the version properly. +# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. - name: Install Node package - yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present - when: ansible_pkg_mgr == "yum" - register: node_install_result - -- name: Install Node package - dnf: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present - when: ansible_pkg_mgr == "dnf" - register: node_install_result + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present" + when: not openshift.common.is_containerized | bool - name: Install sdn-ovs package - yum: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present - register: sdn_install_result - when: ansible_pkg_mgr == "yum" and openshift.common.use_openshift_sdn + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present" + when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool -- name: Install sdn-ovs package - dnf: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present - register: sdn_install_result - when: ansible_pkg_mgr == "dnf" and openshift.common.use_openshift_sdn +- name: Pull node image + command: > + docker pull {{ openshift.node.node_image }} + when: openshift.common.is_containerized | bool + +- name: Pull OpenVSwitch image + command: > + docker pull {{ openshift.node.ovs_image }} + when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool + +- name: Install Node docker service file + template: + dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" + src: openshift.docker.node.service + register: install_node_result + when: openshift.common.is_containerized | bool + +- name: Install OpenvSwitch docker service file + template: + dest: "/etc/systemd/system/openvswitch.service" + src: openvswitch.docker.service + register: install_ovs_result + when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool + +- name: Reload systemd units + command: systemctl daemon-reload + when: openshift.common.is_containerized and ( ( install_node_result | changed ) + or ( install_ovs_result | changed ) ) + +- name: Start and enable openvswitch docker service + service: name=openvswitch.service enabled=yes state=started + when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool # TODO: add the validate parameter when there is a validation command to run - name: Create the Node config @@ -70,6 +91,7 @@ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node regexp: "{{ item.regex }}" line: "{{ item.line }}" + create: true with_items: - regex: '^OPTIONS=' line: "OPTIONS=--loglevel={{ openshift.node.debug_level }}" @@ -78,70 +100,27 @@ notify: - restart node -- stat: path=/etc/sysconfig/docker - register: docker_check - - # TODO: Enable secure registry when code available in origin -- name: Secure Registry and Logs Options - lineinfile: - dest: /etc/sysconfig/docker - regexp: '^OPTIONS=.*$' - line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} \ -{% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %} \ -{% if openshift.node.docker_log_driver is defined %} --log-driver {{ openshift.node.docker_log_driver }} {% endif %} \ -{% if openshift.node.docker_log_options is defined %} {{ openshift.node.docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}} {% endif %} '" - when: docker_check.stat.isreg - notify: - - restart docker - -- set_fact: - docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') - | oo_split() | union(['registry.access.redhat.com']) - | difference(['']) }}" - when: openshift.common.deployment_type == 'enterprise' -- set_fact: - docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') - | oo_split() | difference(['']) }}" - when: openshift.common.deployment_type != 'enterprise' - -- name: Add personal registries - lineinfile: - dest: /etc/sysconfig/docker - regexp: '^ADD_REGISTRY=.*$' - line: "ADD_REGISTRY='{{ docker_additional_registries - | oo_prepend_strings_in_list('--add-registry ') | join(' ') }}'" - when: docker_check.stat.isreg and docker_additional_registries - notify: - - restart docker - -- name: Block registries - lineinfile: - dest: /etc/sysconfig/docker - regexp: '^BLOCK_REGISTRY=.*$' - line: "BLOCK_REGISTRY='{{ lookup('oo_option', 'docker_blocked_registries') | oo_split() - | oo_prepend_strings_in_list('--block-registry ') | join(' ') }}'" - when: docker_check.stat.isreg and - lookup('oo_option', 'docker_blocked_registries') != '' - notify: - - restart docker - -- name: Grant access to additional insecure registries - lineinfile: - dest: /etc/sysconfig/docker - regexp: '^INSECURE_REGISTRY=.*' - line: "INSECURE_REGISTRY='{{ lookup('oo_option', 'docker_insecure_registries') | oo_split() - | oo_prepend_strings_in_list('--insecure-registry ') | join(' ') }}'" - when: docker_check.stat.isreg and - lookup('oo_option', 'docker_insecure_registries') != '' - notify: - - restart docker - - name: Additional storage plugin configuration include: storage_plugins/main.yml +# Necessary because when you're on a node that's also a master the master will be +# restarted after the node restarts docker and it will take up to 60 seconds for +# systemd to start the master again +- name: Wait for master API to become available before proceeding + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl -k --head --silent {{ openshift_node_master_api_url }} + register: api_available_output + until: api_available_output.stdout.find("200 OK") != -1 + retries: 120 + delay: 1 + changed_when: false + when: openshift.common.is_containerized | bool + - name: Start and enable node service: name={{ openshift.common.service_type }}-node enabled=yes state=started register: start_result - set_fact: - node_service_status_changed = start_result | changed + node_service_status_changed: "{{ start_result | changed }}" diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml index b5146dcac..eed3c99a3 100644 --- a/roles/openshift_node/tasks/storage_plugins/ceph.yml +++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml @@ -1,12 +1,4 @@ --- - name: Install Ceph storage plugin dependencies - yum: - pkg: ceph-common - state: installed - when: ansible_pkg_mgr == "yum" - -- name: Install Ceph storage plugin dependencies - dnf: - pkg: ceph-common - state: installed - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=ceph-common state=present" + when: not openshift.common.is_atomic | bool
\ No newline at end of file diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml index a357023e1..8fc8497fa 100644 --- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml @@ -1,15 +1,7 @@ --- - name: Install GlusterFS storage plugin dependencies - yum: - pkg: glusterfs-fuse - state: installed - when: ansible_pkg_mgr == "yum" - -- name: Install GlusterFS storage plugin dependencies - dnf: - pkg: glusterfs-fuse - state: installed - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present" + when: not openshift.common.is_atomic | bool - name: Set sebooleans to allow gluster storage plugin access from containers seboolean: diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml index 1edf21d9b..14a613786 100644 --- a/roles/openshift_node/tasks/storage_plugins/nfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml @@ -1,4 +1,8 @@ --- +- name: Install NFS storage plugin dependencies + action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present" + when: not openshift.common.is_atomic | bool + - name: Set seboolean to allow nfs storage plugin access from containers seboolean: name: virt_use_nfs diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 513339bb7..44065f4bd 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -11,9 +11,7 @@ imageConfig: format: {{ openshift.node.registry_url }} latest: false kind: NodeConfig -{% if openshift.node.kubelet_args is defined and openshift.node.kubelet_args %} -kubeletArguments: {{ openshift.node.kubelet_args | to_json }} -{% endif %} +kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }} masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig {% if openshift.common.use_openshift_sdn %} networkPluginName: {{ openshift.common.sdn_network_plugin_name }} diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service new file mode 100644 index 000000000..7a11a10fa --- /dev/null +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -0,0 +1,20 @@ +[Unit] +After=docker.service +After={{ openshift.common.service_type }}-master.service +After=openvswitch.service +{% if openshift.common.use_openshift_sdn %} +Requires=openvswitch.service +{% endif %} +Requires=docker.service +PartOf=docker.service + +[Service] +EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node +ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system {{ openshift.node.node_image }} +ExecStartPost=/usr/bin/sleep 10 +ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service new file mode 100644 index 000000000..0b42ca6d5 --- /dev/null +++ b/roles/openshift_node/templates/openvswitch.docker.service @@ -0,0 +1,13 @@ +[Unit] +After=docker.service +Requires=docker.service +PartOf=docker.service + +[Service] +ExecStartPre=-/usr/bin/docker rm -f openvswitch +ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ openshift.node.ovs_image }} +ExecStop=/usr/bin/docker stop openvswitch +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index 57f71887b..b54811414 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -17,19 +17,19 @@ --signer-serial={{ openshift_master_ca_serial }} --user=system:node:{{ item.openshift.common.hostname }} args: - chdir: "{{ openshift_generated_configs_dir }}" creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}" with_items: nodes_needing_certs - name: Generate the node server certificate command: > - {{ openshift.common.admin_binary }} create-server-cert - --cert=server.crt --key=server.key --overwrite=true + {{ openshift.common.admin_binary }} ca create-server-cert + --cert={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt + --key={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.key + --overwrite=true --hostnames={{ item.openshift.common.all_hostnames |join(",") }} --signer-cert={{ openshift_master_ca_cert }} --signer-key={{ openshift_master_ca_key }} --signer-serial={{ openshift_master_ca_serial }} args: - chdir: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}" creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt" with_items: nodes_needing_certs diff --git a/roles/openshift_registry/tasks/main.yml b/roles/openshift_registry/tasks/main.yml index 749eea5c0..2804e8f2e 100644 --- a/roles/openshift_registry/tasks/main.yml +++ b/roles/openshift_registry/tasks/main.yml @@ -1,6 +1,4 @@ --- -# This role is unused until we add options for configuring the backend storage - - set_fact: _oreg_images="--images='{{ openshift.master.registry_url }}'" - set_fact: _oreg_selector="--selector='{{ openshift.master.registry_selector }}'" @@ -12,3 +10,19 @@ --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig {{ _oreg_images }} register: _oreg_results changed_when: "'service exists' not in _oreg_results.stdout" + +- name: Determine if nfs volume is already attached + command: "{{ openshift.common.client_binary }} get -o template dc/docker-registry --template=\\{\\{.spec.template.spec.volumes\\}\\}" + register: registry_volumes_output + when: attach_registry_volume | bool + +- set_fact: + volume_already_attached: "{{ 'server:' + nfs_host in registry_volumes_output.stdout and 'path:' + registry_volume_path in registry_volumes_output.stdout }}" + when: attach_registry_volume | bool + +- name: Add nfs volume to dc/docker-registry + command: > + {{ openshift.common.client_binary }} volume dc/docker-registry + --add --overwrite --name=registry-storage --mount-path=/registry + --source='{"nfs": {"server": "{{ nfs_host }}", "path": "{{ registry_volume_path }}"}}' + when: attach_registry_volume | bool and not volume_already_attached | bool diff --git a/roles/openshift_repos/handlers/main.yml b/roles/openshift_repos/handlers/main.yml index fed4ab2f0..198fc7d6e 100644 --- a/roles/openshift_repos/handlers/main.yml +++ b/roles/openshift_repos/handlers/main.yml @@ -1,6 +1,3 @@ --- -- name: refresh yum cache - command: yum clean all - -- name: refresh dnf cache - command: dnf clean all +- name: refresh cache + command: "{{ ansible_pkg_mgr }} clean all" diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index c55b5df89..8a75639c2 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -8,33 +8,25 @@ # proper repos correctly. - assert: - that: openshift.common.deployment_type in known_openshift_deployment_types + that: openshift_deployment_type in known_openshift_deployment_types - name: Ensure libselinux-python is installed - yum: - pkg: libselinux-python - state: present - when: ansible_pkg_mgr == "yum" - -- name: Ensure libselinux-python is installed - dnf: - pkg: libselinux-python - state: present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=libselinux-python state=present" + when: not openshift.common.is_containerized | bool - name: Create any additional repos that are defined template: src: yum_repo.j2 dest: /etc/yum.repos.d/openshift_additional.repo - when: openshift_additional_repos | length > 0 - notify: refresh yum cache + when: openshift_additional_repos | length > 0 and not openshift.common.is_containerized | bool + notify: refresh cache - name: Remove the additional repos if no longer defined file: dest: /etc/yum.repos.d/openshift_additional.repo state: absent - when: openshift_additional_repos | length == 0 - notify: refresh yum cache + when: openshift_additional_repos | length == 0 and not openshift.common.is_containerized | bool + notify: refresh cache - name: Remove any yum repo files for other deployment types RHEL/CentOS file: @@ -44,7 +36,8 @@ - '*/repos/*' when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) and (ansible_os_family == "RedHat" and ansible_distribution != "Fedora") - notify: refresh yum cache + and not openshift.common.is_containerized | bool + notify: refresh cache - name: Remove any yum repo files for other deployment types Fedora file: @@ -53,25 +46,28 @@ with_fileglob: - '*/repos/*' when: not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos")) and - (ansible_distribution == "Fedora") - notify: refresh dnf cache + (ansible_distribution == "Fedora") + and not openshift.common.is_containerized | bool + notify: refresh cache - name: Configure gpg keys if needed copy: src={{ item }} dest=/etc/pki/rpm-gpg/ with_fileglob: - "{{ openshift_deployment_type }}/gpg_keys/*" - notify: refresh yum cache + notify: refresh cache + when: not openshift.common.is_containerized | bool - name: Configure yum repositories RHEL/CentOS copy: src={{ item }} dest=/etc/yum.repos.d/ with_fileglob: - "{{ openshift_deployment_type }}/repos/*" - notify: refresh yum cache + notify: refresh cache when: (ansible_os_family == "RedHat" and ansible_distribution != "Fedora") + and not openshift.common.is_containerized | bool - name: Configure yum repositories Fedora copy: src={{ item }} dest=/etc/yum.repos.d/ with_fileglob: - "fedora-{{ openshift_deployment_type }}/repos/*" - notify: refresh dnf cache - when: (ansible_distribution == "Fedora") + notify: refresh cache + when: (ansible_distribution == "Fedora") and not openshift.common.is_containerized | bool diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml index e558a83a2..4c7faa6fe 100644 --- a/roles/openshift_serviceaccounts/tasks/main.yml +++ b/roles/openshift_serviceaccounts/tasks/main.yml @@ -1,12 +1,19 @@ +- name: tmp dir for openshift + file: + path: /tmp/openshift + state: directory + owner: root + mode: 700 + - name: Create service account configs template: src: serviceaccount.j2 - dest: "/tmp/{{ item }}-serviceaccount.yaml" + dest: "/tmp/openshift/{{ item }}-serviceaccount.yaml" with_items: accounts - name: Create {{ item }} service account command: > - {{ openshift.common.client_binary }} create -f "/tmp/{{ item }}-serviceaccount.yaml" + {{ openshift.common.client_binary }} create -f "/tmp/openshift/{{ item }}-serviceaccount.yaml" with_items: accounts register: _sa_result failed_when: "'serviceaccounts \"{{ item }}\" already exists' not in _sa_result.stderr and _sa_result.rc != 0" @@ -15,14 +22,15 @@ - name: Get current security context constraints shell: > {{ openshift.common.client_binary }} get scc privileged -o yaml - --output-version=v1 > /tmp/scc.yaml + --output-version=v1 > /tmp/openshift/scc.yaml + changed_when: false - name: Add security context constraint for {{ item }} lineinfile: - dest: /tmp/scc.yaml + dest: /tmp/openshift/scc.yaml line: "- system:serviceaccount:default:{{ item }}" insertafter: "^users:$" with_items: accounts - name: Apply new scc rules for service accounts - command: "{{ openshift.common.client_binary }} update -f /tmp/scc.yaml --api-version=v1" + command: "{{ openshift.common.client_binary }} update -f /tmp/openshift/scc.yaml --api-version=v1" diff --git a/roles/openshift_storage_nfs/README.md b/roles/openshift_storage_nfs/README.md new file mode 100644 index 000000000..548e146cb --- /dev/null +++ b/roles/openshift_storage_nfs/README.md @@ -0,0 +1,52 @@ +OpenShift NFS Server +==================== + +OpenShift NFS Server Installation + +Requirements +------------ + +This role is intended to be applied to the [nfs] host group which is +separate from OpenShift infrastructure components. + +Requires access to the 'nfs-utils' package. + +Role Variables +-------------- + +From this role: +| Name | Default value | | +|-------------------------------|-----------------------|--------------------------------------------------| +| openshift_nfs_exports_dir | /var/export | Root export directory. | +| openshift_nfs_registry_volume | regvol | Registry volume within openshift_nfs_exports_dir | +| openshift_nfs_export_options | *(rw,sync,all_squash) | NFS options for configured exports. | + + +From openshift_common: +| Name | Default Value | | +|-------------------------------|----------------|----------------------------------------| +| openshift_debug_level | 2 | Global openshift debug log verbosity | + + +Dependencies +------------ + + + +Example Playbook +---------------- + +- name: Configure nfs hosts + hosts: oo_nfs_to_config + roles: + - role: openshift_storage_nfs + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Andrew Butcher (abutcher@redhat.com) diff --git a/roles/openshift_storage_nfs/defaults/main.yml b/roles/openshift_storage_nfs/defaults/main.yml new file mode 100644 index 000000000..e25062c00 --- /dev/null +++ b/roles/openshift_storage_nfs/defaults/main.yml @@ -0,0 +1,8 @@ +--- +exports_dir: /var/export +registry_volume: regvol +export_options: '*(rw,sync,all_squash)' +os_firewall_use_firewalld: False +os_firewall_allow: +- service: nfs + port: "2049/tcp" diff --git a/roles/openshift_storage_nfs/handlers/main.yml b/roles/openshift_storage_nfs/handlers/main.yml new file mode 100644 index 000000000..a1377a203 --- /dev/null +++ b/roles/openshift_storage_nfs/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart nfs-server + service: + name: nfs-server + state: restarted + when: not (nfs_service_status_changed | default(false)) diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml new file mode 100644 index 000000000..2975daf52 --- /dev/null +++ b/roles/openshift_storage_nfs/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Andrew Butcher + description: OpenShift NFS Server + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 +dependencies: +- { role: os_firewall } +- { role: openshift_common } +- { role: openshift_repos } diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml new file mode 100644 index 000000000..64b121ade --- /dev/null +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -0,0 +1,49 @@ +--- +- name: Set nfs facts + openshift_facts: + role: nfs + local_facts: + exports_dir: "{{ openshift_nfs_exports_dir | default(None) }}" + export_options: "{{ openshift_nfs_export_options | default(None) }}" + registry_volume: "{{ openshift_nfs_registry_volume | default(None) }}" + +- name: Install nfs-utils + yum: + pkg: nfs-utils + state: present + +- name: Ensure exports directory exists + file: + path: "{{ openshift.nfs.exports_dir }}" + state: directory + +- name: Ensure export directories exist + file: + path: "{{ openshift.nfs.exports_dir }}/{{ item }}" + state: directory + mode: 0777 + owner: nfsnobody + group: nfsnobody + with_items: + - "{{ openshift.nfs.registry_volume }}" + +- name: Configure exports + template: + dest: /etc/exports + src: exports.j2 + notify: + - restart nfs-server + +- name: Enable and start services + service: + name: "{{ item }}" + state: started + enabled: yes + register: start_result + with_items: + - nfs-server + +- set_fact: + nfs_service_status_changed: "{{ True in (start_result.results + | map(attribute='changed') + | list) }}" diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 new file mode 100644 index 000000000..702473040 --- /dev/null +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -0,0 +1 @@ +{{ openshift.nfs.exports_dir }}/{{ openshift.nfs.registry_volume }} {{ openshift.nfs.export_options }} diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml index ead81b876..ea0cc2a94 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/main.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml @@ -1,4 +1,9 @@ --- +# TODO -- this may actually work on atomic hosts +- fail: + msg: "openshift_storage_nfs_lvm is not compatible with atomic host" + when: openshift.common.is_atomic | true + - name: Create lvm volumes lvol: vg={{osnl_volume_group}} lv={{ item }} size={{osnl_volume_size}}G with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml index bf23dfe98..fc8de1cb5 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml @@ -1,12 +1,8 @@ --- - name: Install NFS server - yum: name=nfs-utils state=present - when: ansible_pkg_mgr == "yum" - -- name: Install NFS server - dnf: name=nfs-utils state=present - when: ansible_pkg_mgr == "dnf" - + action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present" + when: not openshift.common.is_containerized | bool + - name: Start rpcbind service: name=rpcbind state=started enabled=yes diff --git a/roles/os_env_extras/tasks/main.yaml b/roles/os_env_extras/tasks/main.yaml index 29599559c..628df713a 100644 --- a/roles/os_env_extras/tasks/main.yaml +++ b/roles/os_env_extras/tasks/main.yaml @@ -12,13 +12,5 @@ dest: /root/.vimrc - name: Bash Completion - yum: - pkg: bash-completion - state: installed - when: ansible_pkg_mgr == "yum" - -- name: Bash Completion - dnf: - pkg: bash-completion - state: installed - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=bash-completion state=present" + when: not openshift.common.is_containerized | bool
\ No newline at end of file diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml index bcf1d9a34..e3176e611 100644 --- a/roles/os_firewall/defaults/main.yml +++ b/roles/os_firewall/defaults/main.yml @@ -1,2 +1,3 @@ --- +os_firewall_enabled: True os_firewall_use_firewalld: True diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml index cf2a2c733..ac4600f83 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewall/firewalld.yml @@ -1,16 +1,7 @@ --- - name: Install firewalld packages - yum: - name: firewalld - state: present - when: ansible_pkg_mgr == "yum" - register: install_result - -- name: Install firewalld packages - dnf: - name: firewalld - state: present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=firewalld state=present" + when: not openshift.common.is_containerized | bool register: install_result - name: Check if iptables-services is installed diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 36d51504c..5cf4bf7af 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -1,23 +1,11 @@ --- - name: Install iptables packages - yum: - name: "{{ item }}" - state: present + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" with_items: - iptables - iptables-services - when: ansible_pkg_mgr == "yum" - register: install_result - -- name: Install iptables packages - dnf: - name: "{{ item }}" - state: present - with_items: - - iptables - - iptables-services - when: ansible_pkg_mgr == "dnf" register: install_result + when: not openshift.common.is_atomic | bool - name: Check if firewalld is installed command: rpm -q firewalld diff --git a/roles/os_firewall/tasks/main.yml b/roles/os_firewall/tasks/main.yml index ad89ef97c..076e5e311 100644 --- a/roles/os_firewall/tasks/main.yml +++ b/roles/os_firewall/tasks/main.yml @@ -1,6 +1,6 @@ --- - include: firewall/firewalld.yml - when: os_firewall_use_firewalld + when: os_firewall_enabled | bool and os_firewall_use_firewalld | bool - include: firewall/iptables.yml - when: not os_firewall_use_firewalld + when: os_firewall_enabled | bool and not os_firewall_use_firewalld | bool diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml index 40eec8d35..2400164fa 100644 --- a/roles/os_update_latest/tasks/main.yml +++ b/roles/os_update_latest/tasks/main.yml @@ -1,8 +1,8 @@ --- -- name: Update all packages - yum: name=* state=latest - when: ansible_pkg_mgr == "yum" +- fail: + msg: "Update is not yet supported by this playbook on atomic hosts" + when: openshift.common.is_containerized | bool - name: Update all packages - dnf: name=* state=latest - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=* state=latest" + when: not openshift.common.is_containerized | bool
\ No newline at end of file diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml index d0b307a3d..1c8d88854 100644 --- a/roles/os_zabbix/tasks/main.yml +++ b/roles/os_zabbix/tasks/main.yml @@ -37,6 +37,13 @@ - include_vars: template_aws.yml tags: - aws +- include_vars: template_zagg_server.yml + tags: + - zagg_server + +- include_vars: template_config_loop.yml + tags: + - config_loop - name: Include Template Heartbeat include: ../../lib_zabbix/tasks/create_template.yml @@ -137,3 +144,23 @@ password: "{{ ozb_password }}" tags: - aws + +- name: Include Template Zagg Server + include: ../../lib_zabbix/tasks/create_template.yml + vars: + template: "{{ g_template_zagg_server }}" + server: "{{ ozb_server }}" + user: "{{ ozb_user }}" + password: "{{ ozb_password }}" + tags: + - zagg_server + +- name: Include Template Config Loop + include: ../../lib_zabbix/tasks/create_template.yml + vars: + template: "{{ g_template_config_loop }}" + server: "{{ ozb_server }}" + user: "{{ ozb_user }}" + password: "{{ ozb_password }}" + tags: + - config_loop diff --git a/roles/os_zabbix/vars/template_config_loop.yml b/roles/os_zabbix/vars/template_config_loop.yml new file mode 100644 index 000000000..823da1868 --- /dev/null +++ b/roles/os_zabbix/vars/template_config_loop.yml @@ -0,0 +1,14 @@ +--- +g_template_config_loop: + name: Template Config Loop + zitems: + - key: config_loop.run.exit_code + applications: + - Config Loop + value_type: int + + ztriggers: + - name: 'config_loop.run.exit_code not zero on {HOST.NAME}' + expression: '{Template Config Loop:config_loop.run.exit_code.min(#2)}>0' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_config_loop.asciidoc' + priority: average diff --git a/roles/os_zabbix/vars/template_docker.yml b/roles/os_zabbix/vars/template_docker.yml index bfabf50c5..dd13e76f7 100644 --- a/roles/os_zabbix/vars/template_docker.yml +++ b/roles/os_zabbix/vars/template_docker.yml @@ -12,6 +12,16 @@ g_template_docker: - Docker Daemon value_type: int + - key: docker.container.dns.resolution + applications: + - Docker Daemon + value_type: int + + - key: docker.container.existing.dns.resolution.failed + applications: + - Docker Daemon + value_type: int + - key: docker.storage.is_loopback applications: - Docker Storage @@ -62,6 +72,18 @@ g_template_docker: url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_ping.asciidoc' priority: high + # Re-enable for OpenShift 3.1.1 (https://bugzilla.redhat.com/show_bug.cgi?id=1292971#c6) + - name: 'docker.container.dns.resolution failed on {HOST.NAME}' + expression: '{Template Docker:docker.container.dns.resolution.min(#3)}>0' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_dns.asciidoc' + priority: average + status: disabled + + - name: 'docker.container.existing.dns.resolution.failed on {HOST.NAME}' + expression: '{Template Docker:docker.container.existing.dns.resolution.failed.min(#3)}>0' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_dns.asciidoc' + priority: average + - name: 'Docker storage is using LOOPBACK on {HOST.NAME}' expression: '{Template Docker:docker.storage.is_loopback.last()}<>0' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_loopback.asciidoc' diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml index 514d6fd24..a0ba8d104 100644 --- a/roles/os_zabbix/vars/template_openshift_master.yml +++ b/roles/os_zabbix/vars/template_openshift_master.yml @@ -269,6 +269,14 @@ g_template_openshift_master: - 'Openshift Master process not running on {HOST.NAME}' priority: avg + - name: 'Application creation has failed multiple times in the last hour on {HOST.NAME}' + expression: '{Template Openshift Master:create_app.sum(1h)}>3' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc' + dependencies: + - 'Openshift Master process not running on {HOST.NAME}' + description: The application create loop has failed 4 or more times in the last hour + priority: avg + - name: 'Openshift Master API health check is failing on {HOST.NAME}' expression: '{Template Openshift Master:openshift.master.api.healthz.max(#3)}<1' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' diff --git a/roles/os_zabbix/vars/template_zagg_server.yml b/roles/os_zabbix/vars/template_zagg_server.yml new file mode 100644 index 000000000..db5665993 --- /dev/null +++ b/roles/os_zabbix/vars/template_zagg_server.yml @@ -0,0 +1,46 @@ +--- +g_template_zagg_server: + name: Template Zagg Server + zitems: + - key: zagg.server.metrics.count + applications: + - Zagg Server + value_type: int + + - key: zagg.server.metrics.errors + applications: + - Zagg Server + value_type: int + + - key: zagg.server.heartbeat.errors + applications: + - Zagg Server + value_type: int + + - key: zagg.server.heartbeat.count + applications: + - Zagg Server + value_type: int + + ztriggers: + - name: 'Error processing metrics on {HOST.NAME}' + expression: '{Template Zagg Server:zagg.server.metrics.errors.min(#3)}>0' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/zagg_server.asciidoc' + priority: average + + - name: 'Error processing heartbeats on {HOST.NAME}' + expression: '{Template Zagg Server:zagg.server.heartbeat.errors.min(#3)}>0' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/zagg_server.asciidoc' + priority: average + + - name: 'Critically High number of metrics in Zagg queue {HOST.NAME}' + expression: '{Template Zagg Server:zagg.server.metrics.count.min(#3)}>10000' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/zagg_server.asciidoc' + priority: high + + - name: 'High number of metrics in Zagg queue {HOST.NAME}' + expression: '{Template Zagg Server:zagg.server.metrics.count.min(#3)}>5000' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/zagg_server.asciidoc' + dependencies: + - 'Critically High number of metrics in Zagg queue {HOST.NAME}' + priority: average diff --git a/roles/oso_host_monitoring/handlers/main.yml b/roles/oso_host_monitoring/handlers/main.yml index 7863ad15b..3a5d8024c 100644 --- a/roles/oso_host_monitoring/handlers/main.yml +++ b/roles/oso_host_monitoring/handlers/main.yml @@ -4,9 +4,3 @@ name: "{{ osohm_host_monitoring }}" state: restarted enabled: yes - -- name: "Restart the {{ osohm_zagg_client }} service" - service: - name: "{{ osohm_zagg_client }}" - state: restarted - enabled: yes diff --git a/roles/oso_host_monitoring/tasks/main.yml b/roles/oso_host_monitoring/tasks/main.yml index 6ddfa3dcb..a0a453416 100644 --- a/roles/oso_host_monitoring/tasks/main.yml +++ b/roles/oso_host_monitoring/tasks/main.yml @@ -5,7 +5,6 @@ with_items: - osohm_zagg_web_url - osohm_host_monitoring - - osohm_zagg_client - osohm_docker_registry_url - osohm_default_zagg_server_user - osohm_default_zagg_server_password @@ -37,29 +36,12 @@ - "Restart the {{ osohm_host_monitoring }} service" register: systemd_host_monitoring -- name: "Copy {{ osohm_zagg_client }} systemd file" - template: - src: "{{ osohm_zagg_client }}.service.j2" - dest: "/etc/systemd/system/{{ osohm_zagg_client }}.service" - owner: root - group: root - mode: 0644 - notify: - - "Restart the {{ osohm_zagg_client }} service" - register: zagg_systemd - - name: reload systemd command: /usr/bin/systemctl --system daemon-reload - when: systemd_host_monitoring | changed or zagg_systemd | changed + when: systemd_host_monitoring | changed - name: "Start the {{ osohm_host_monitoring }} service" service: name: "{{ osohm_host_monitoring }}" state: started enabled: yes - -- name: "Start the {{ osohm_zagg_client }} service" - service: - name: "{{ osohm_zagg_client }}" - state: started - enabled: yes diff --git a/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2 b/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2 deleted file mode 100644 index d18ad90fe..000000000 --- a/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2 +++ /dev/null @@ -1,43 +0,0 @@ -# This is a systemd file to run this docker container under systemd. -# To make this work: -# * pull the image (probably from ops docker registry) -# * place this file in /etc/systemd/system without the .systemd extension -# * run the commands: -# systemctl daemon-reload -# systemctl enable pcp-docker -# systemctl start pcp-docker -# -# -[Unit] -Description=PCP Collector Contatainer -Requires=docker.service -After=docker.service - - -[Service] -Type=simple -TimeoutStartSec=5m -Environment=HOME=/etc/docker/ops -#Slice=container-small.slice - -# systemd syntax '=-' ignore errors from return codes. -ExecStartPre=-/usr/bin/docker kill "{{ osohm_host_monitoring }}" -ExecStartPre=-/usr/bin/docker rm "{{ osohm_host_monitoring }}" -ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}" - - -ExecStart=/usr/bin/docker run --rm --name="{{ osohm_host_monitoring }}" \ - --privileged --net=host --pid=host --ipc=host \ - -v /sys:/sys:ro -v /etc/localtime:/etc/localtime:ro \ - -v /var/lib/docker:/var/lib/docker:ro -v /run:/run \ - -v /var/log:/var/log \ - {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }} - -ExecReload=-/usr/bin/docker stop "{{ osohm_host_monitoring }}" -ExecReload=-/usr/bin/docker rm "{{ osohm_host_monitoring }}" -ExecStop=-/usr/bin/docker stop "{{ osohm_host_monitoring }}" -Restart=always -RestartSec=30 - -[Install] -WantedBy=default.target diff --git a/roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2 b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 index 978e40b88..31f7d4caa 100644 --- a/roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2 +++ b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 @@ -4,12 +4,12 @@ # * place this file in /etc/systemd/system without the .systemd extension # * run the commands: # systemctl daemon-reload -# systemctl enable zagg-client-docker -# systemctl start zagg-client-docker +# systemctl enable oso-rhel7-host-monitoring +# systemctl start oso-rhel7-host-monitoring # # [Unit] -Description=Zagg Client Contatainer +Description=Openshift Host Monitoring Container Requires=docker.service After=docker.service @@ -21,40 +21,53 @@ Environment=HOME=/etc/docker/ops #Slice=container-small.slice # systemd syntax '=-' ignore errors from return codes. -ExecStartPre=-/usr/bin/docker kill "{{ osohm_zagg_client }}" -ExecStartPre=-/usr/bin/docker rm "{{ osohm_zagg_client }}" -ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_zagg_client }}" +ExecStartPre=-/usr/bin/docker kill "{{ osohm_host_monitoring }}" +ExecStartPre=-/usr/bin/docker rm "{{ osohm_host_monitoring }}" +ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}" +# mwoodson note 1-7-16: +# pcp recommends mounting /run in their Dockerfile +# /run conflicts with cron which also runs in this container. +# I am leaving /run out for now. the guys in #pcp said that they mounted /run +# to shared the pcp socket that is created in /run. We are not using this, +# as far as I know. +# This problem goes away with systemd being run in the containers and not using +# cron but using systemd timers +# -v /run:/run \ -ExecStart=/usr/bin/docker run --name {{ osohm_zagg_client }} \ +ExecStart=/usr/bin/docker run --name {{ osohm_host_monitoring }} \ --privileged \ --pid=host \ --net=host \ - -e ZAGG_URL={{ osohm_zagg_web_url }} \ - -e ZAGG_USER={{ osohm_default_zagg_server_user }} \ - -e ZAGG_PASSWORD={{ osohm_default_zagg_server_password }} \ + --ipc=host \ + -e ZAGG_URL={{ osohm_zagg_web_url }} \ + -e ZAGG_USER={{ osohm_default_zagg_server_user }} \ + -e ZAGG_PASSWORD={{ osohm_default_zagg_server_password }} \ -e ZAGG_CLIENT_HOSTNAME={{ ec2_tag_Name }} \ - -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }} \ + -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }} \ -e OSO_CLUSTER_GROUP={{ cluster_group }} \ - -e OSO_CLUSTER_ID={{ oo_clusterid }} \ + -e OSO_CLUSTER_ID={{ oo_clusterid }} \ + -e OSO_ENVIRONMENT={{ oo_environment }} \ -e OSO_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_host-type'] }} \ -e OSO_SUB_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }} \ -v /etc/localtime:/etc/localtime \ - -v /run/pcp:/run/pcp \ + -v /sys:/sys:ro \ + -v /sys/fs/selinux \ + -v /var/lib/docker:/var/lib/docker:ro \ -v /var/run/docker.sock:/var/run/docker.sock \ - -v /var/run/openvswitch/db.sock:/var/run/openvswitch/db.sock \ + -v /var/run/openvswitch:/var/run/openvswitch \ {% if hostvars[inventory_hostname]['ec2_tag_host-type'] == 'master' %} -v /etc/openshift/master/admin.kubeconfig:/etc/openshift/master/admin.kubeconfig \ -v /etc/openshift/master/master.etcd-client.crt:/etc/openshift/master/master.etcd-client.crt \ -v /etc/openshift/master/master.etcd-client.key:/etc/openshift/master/master.etcd-client.key \ -v /etc/openshift/master/master-config.yaml:/etc/openshift/master/master-config.yaml \ {% endif %} - {{ osohm_docker_registry_url }}{{ osohm_zagg_client }} + {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }} -ExecReload=-/usr/bin/docker stop "{{ osohm_zagg_client }}" -ExecReload=-/usr/bin/docker rm "{{ osohm_zagg_client }}" -ExecStop=-/usr/bin/docker stop "{{ osohm_zagg_client }}" +ExecReload=-/usr/bin/docker stop "{{ osohm_host_monitoring }}" +ExecReload=-/usr/bin/docker rm "{{ osohm_host_monitoring }}" +ExecStop=-/usr/bin/docker stop "{{ osohm_host_monitoring }}" Restart=always RestartSec=30 diff --git a/roles/oso_monitoring_tools/README.md b/roles/oso_monitoring_tools/README.md new file mode 100644 index 000000000..4215f9eeb --- /dev/null +++ b/roles/oso_monitoring_tools/README.md @@ -0,0 +1,54 @@ +Role Name +========= + +This role will install the Openshift Monitoring Utilities + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +osomt_zagg_client_config + +from vars/main.yml: + +osomt_zagg_client_config: + host: + name: "{{ osomt_host_name }}" + zagg: + url: "{{ osomt_zagg_url }}" + user: "{{ osomt_zagg_user }}" + pass: "{{ osomt_zagg_password }}" + ssl_verify: "{{ osomt_zagg_ssl_verify }}" + verbose: "{{ osomt_zagg_verbose }}" + debug: "{{ osomt_zagg_debug }}" + +Dependencies +------------ + +None + +Example Playbook +---------------- + +- role: "oso_monitoring_tools" + osomt_host_name: hostname + osomt_zagg_url: http://path.to/zagg_web + osomt_zagg_user: admin + osomt_zagg_password: password + osomt_zagg_ssl_verify: True + osomt_zagg_verbose: False + osomt_zagg_debug: False + +License +------- + +BSD + +Author Information +------------------ + +Openshift Operations diff --git a/roles/oso_monitoring_tools/defaults/main.yml b/roles/oso_monitoring_tools/defaults/main.yml new file mode 100644 index 000000000..a17424f25 --- /dev/null +++ b/roles/oso_monitoring_tools/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for oso_monitoring_tools diff --git a/roles/oso_monitoring_tools/handlers/main.yml b/roles/oso_monitoring_tools/handlers/main.yml new file mode 100644 index 000000000..cefa780ab --- /dev/null +++ b/roles/oso_monitoring_tools/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for oso_monitoring_tools diff --git a/roles/oso_monitoring_tools/meta/main.yml b/roles/oso_monitoring_tools/meta/main.yml new file mode 100644 index 000000000..9c42b68dc --- /dev/null +++ b/roles/oso_monitoring_tools/meta/main.yml @@ -0,0 +1,8 @@ +--- +galaxy_info: + author: OpenShift Operations + description: Install Openshift Monitoring tools + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 1.2 +dependencies: [] diff --git a/roles/oso_monitoring_tools/tasks/main.yml b/roles/oso_monitoring_tools/tasks/main.yml new file mode 100644 index 000000000..c90fc56e2 --- /dev/null +++ b/roles/oso_monitoring_tools/tasks/main.yml @@ -0,0 +1,18 @@ +--- +# tasks file for oso_monitoring_tools +- name: Install the Openshift Tools RPMS + yum: + name: "{{ item }}" + state: latest + with_items: + - openshift-tools-scripts-monitoring-zagg-client + - python-openshift-tools-monitoring-zagg + - python-openshift-tools-monitoring-zabbix + +- debug: var=g_zagg_client_config + +- name: Generate the /etc/openshift_tools/zagg_client.yaml config file + copy: + content: "{{ osomt_zagg_client_config | to_nice_yaml }}" + dest: /etc/openshift_tools/zagg_client.yaml + mode: "644" diff --git a/roles/oso_monitoring_tools/vars/main.yml b/roles/oso_monitoring_tools/vars/main.yml new file mode 100644 index 000000000..3538ba30b --- /dev/null +++ b/roles/oso_monitoring_tools/vars/main.yml @@ -0,0 +1,12 @@ +--- +# vars file for oso_monitoring_tools +osomt_zagg_client_config: + host: + name: "{{ osomt_host_name }}" + zagg: + url: "{{ osomt_zagg_url }}" + user: "{{ osomt_zagg_user }}" + pass: "{{ osomt_zagg_password }}" + ssl_verify: "{{ osomt_zagg_ssl_verify }}" + verbose: "{{ osomt_zagg_verbose }}" + debug: "{{ osomt_zagg_debug }}" diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml index e9e6e4bd4..08540f440 100644 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ b/roles/rhel_subscribe/tasks/enterprise.yml @@ -2,8 +2,24 @@ - name: Disable all repositories command: subscription-manager repos --disable="*" +- set_fact: + default_ose_version: '3.0' + when: deployment_type == 'enterprise' + +- set_fact: + default_ose_version: '3.1' + when: deployment_type in ['atomic-enterprise', 'openshift-enterprise'] + +- set_fact: + ose_version: "{{ lookup('oo_option', 'ose_version') | default(default_ose_version, True) }}" + +- fail: + msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type" + when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or + ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1'] ) + - name: Enable RHEL repositories command: subscription-manager repos \ --enable="rhel-7-server-rpms" \ --enable="rhel-7-server-extras-rpms" \ - --enable="rhel-7-server-ose-3.0-rpms" + --enable="rhel-7-server-ose-{{ ose_version }}-rpms" diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index 30c0920a1..eecfd04a0 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -4,6 +4,7 @@ # to make it able to enable repositories - set_fact: + rhel_subscription_pool: "{{ lookup('oo_option', 'rhel_subscription_pool') | default(rhsub_pool, True) | default('OpenShift Enterprise, Premium*', True) }}" rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}" rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}" rhel_subscription_server: "{{ lookup('oo_option', 'rhel_subscription_server') | default(rhsub_server) }}" @@ -30,7 +31,14 @@ redhat_subscription: username: "{{ rhel_subscription_user }}" password: "{{ rhel_subscription_pass }}" - autosubscribe: yes + +- name: Retrieve the OpenShift Pool ID + command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only + register: openshift_pool_id + changed_when: False + +- name: Attach to OpenShift Pool + command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }} - include: enterprise.yml - when: deployment_type == 'enterprise' + when: deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ] diff --git a/roles/tito/tasks/main.yml b/roles/tito/tasks/main.yml index f7b4ef363..3cf9e2bfd 100644 --- a/roles/tito/tasks/main.yml +++ b/roles/tito/tasks/main.yml @@ -1,4 +1,2 @@ --- -- yum: - name: tito - state: present +- action: "{{ ansible_pkg_mgr }} name=tito state=present" diff --git a/roles/yum_repos/tasks/main.yml b/roles/yum_repos/tasks/main.yml index a9903c6c6..46928a00b 100644 --- a/roles/yum_repos/tasks/main.yml +++ b/roles/yum_repos/tasks/main.yml @@ -45,3 +45,4 @@ src: yumrepo.j2 dest: /etc/yum.repos.d/{{ item.id }}.repo with_items: repo_files + when: not openshift.common.is_containerized | bool diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index dc88cb1ad..1aacf3a4b 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -33,9 +33,7 @@ def is_valid_hostname(hostname): def validate_prompt_hostname(hostname): if '' == hostname or is_valid_hostname(hostname): return hostname - raise click.BadParameter('"{}" appears to be an invalid hostname. ' \ - 'Please double-check this value i' \ - 'and re-enter it.'.format(hostname)) + raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.') def get_ansible_ssh_user(): click.clear() @@ -72,7 +70,7 @@ def delete_hosts(hosts): click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx)) return hosts, None -def collect_hosts(version=None, masters_set=False, print_summary=True): +def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True): """ Collect host information from user. This will later be filled in using ansible. @@ -125,26 +123,29 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen host_props['master'] = True num_masters += 1 - if version == '3.0': + if oo_cfg.settings['variant_version'] == '3.0': masters_set = True host_props['node'] = True - #TODO: Reenable this option once container installs are out of tech preview - #rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', - # type=click.Choice(['rpm', 'container']), - # default='rpm') - #if rpm_or_container == 'container': - # host_props['containerized'] = True - #else: - # host_props['containerized'] = False host_props['containerized'] = False + if oo_cfg.settings['variant_version'] != '3.0': + rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', + type=click.Choice(['rpm', 'container']), + default='rpm') + if rpm_or_container == 'container': + host_props['containerized'] = True + + if existing_env: + host_props['new_host'] = True + else: + host_props['new_host'] = False host = Host(**host_props) hosts.append(host) if print_summary: - print_installation_summary(hosts) + print_installation_summary(hosts, oo_cfg.settings['variant_version']) # If we have one master, this is enough for an all-in-one deployment, # thus we can start asking if you wish to proceed. Otherwise we assume @@ -158,7 +159,7 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen return hosts -def print_installation_summary(hosts): +def print_installation_summary(hosts, version=None): """ Displays a summary of all hosts configured thus far, and what role each will play. @@ -179,7 +180,7 @@ def print_installation_summary(hosts): click.echo('Total OpenShift Masters: %s' % len(masters)) click.echo('Total OpenShift Nodes: %s' % len(nodes)) - if len(masters) == 1: + if len(masters) == 1 and version != '3.0': ha_hint_message = """ NOTE: Add a total of 3 or more Masters to perform an HA installation.""" click.echo(ha_hint_message) @@ -494,20 +495,20 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h click.clear() if not oo_cfg.hosts: - oo_cfg.hosts = collect_hosts(version=oo_cfg.settings['variant_version']) + oo_cfg.hosts = collect_hosts(oo_cfg) click.clear() return oo_cfg -def collect_new_nodes(): +def collect_new_nodes(oo_cfg): click.clear() click.echo('*** New Node Configuration ***') message = """ Add new nodes here """ click.echo(message) - return collect_hosts(masters_set=True, print_summary=False) + return collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False) def get_installed_hosts(hosts, callback_facts): installed_hosts = [] @@ -577,7 +578,7 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): sys.exit(1) else: if not force: - new_nodes = collect_new_nodes() + new_nodes = collect_new_nodes(oo_cfg) hosts_to_run_on.extend(new_nodes) oo_cfg.hosts.extend(new_nodes) @@ -752,7 +753,7 @@ def install(ctx, force): check_hosts_config(oo_cfg, ctx.obj['unattended']) - print_installation_summary(oo_cfg.hosts) + print_installation_summary(oo_cfg.hosts, oo_cfg.settings.get('variant_version', None)) click.echo('Gathering information from hosts...') callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose) diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index 031b82bc1..33ab27567 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -38,6 +38,7 @@ class Host(object): self.public_hostname = kwargs.get('public_hostname', None) self.connect_to = kwargs.get('connect_to', None) self.preconfigured = kwargs.get('preconfigured', None) + self.new_host = kwargs.get('new_host', None) # Should this host run as an OpenShift master: self.master = kwargs.get('master', False) @@ -68,7 +69,8 @@ class Host(object): """ Used when exporting to yaml. """ d = {} for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', - 'master', 'node', 'master_lb', 'containerized', 'connect_to', 'preconfigured']: + 'master', 'node', 'master_lb', 'containerized', + 'connect_to', 'preconfigured', 'new_host']: # If the property is defined (not None or False), export it: if getattr(self, prop): d[prop] = getattr(self, prop) diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index fd2cd7fbd..c0d115fdc 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -19,13 +19,15 @@ def generate_inventory(hosts): global CFG masters = [host for host in hosts if host.master] nodes = [host for host in hosts if host.node] + new_nodes = [host for host in hosts if host.node and host.new_host] proxy = determine_proxy_configuration(hosts) multiple_masters = len(masters) > 1 + scaleup = len(new_nodes) > 0 base_inventory_path = CFG.settings['ansible_inventory_path'] base_inventory = open(base_inventory_path, 'w') - write_inventory_children(base_inventory, multiple_masters, proxy) + write_inventory_children(base_inventory, multiple_masters, proxy, scaleup) write_inventory_vars(base_inventory, multiple_masters, proxy) @@ -71,6 +73,11 @@ def generate_inventory(hosts): base_inventory.write('\n[lb]\n') write_host(proxy, base_inventory) + if scaleup: + base_inventory.write('\n[new_nodes]\n') + for node in new_nodes: + write_host(node, base_inventory) + base_inventory.close() return base_inventory_path @@ -84,12 +91,14 @@ def determine_proxy_configuration(hosts): return None -def write_inventory_children(base_inventory, multiple_masters, proxy): +def write_inventory_children(base_inventory, multiple_masters, proxy, scaleup): global CFG base_inventory.write('\n[OSEv3:children]\n') base_inventory.write('masters\n') base_inventory.write('nodes\n') + if scaleup: + base_inventory.write('new_nodes\n') if multiple_masters: base_inventory.write('etcd\n') if not getattr(proxy, 'preconfigured', True): @@ -119,6 +128,8 @@ def write_host(host, inventory, schedulable=None): facts += ' openshift_hostname={}'.format(host.hostname) if host.public_hostname: facts += ' openshift_public_hostname={}'.format(host.public_hostname) + if host.containerized: + facts += ' containerized={}'.format(host.containerized) # TODO: For not write_host is handles both master and nodes. # Technically only nodes will ever need this. diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 1da49c807..72e8521d0 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -681,9 +681,9 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True), - ('10.0.0.2', False), - ('10.0.0.3', False)], + ('10.0.0.1', True, False), + ('10.0.0.2', False, False), + ('10.0.0.3', False, False)], ssh_user='root', variant_num=1, confirm_facts='y') @@ -722,10 +722,10 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True), - ('10.0.0.2', False), + ('10.0.0.1', True, False), + ('10.0.0.2', False, False), ], - add_nodes=[('10.0.0.3', False)], + add_nodes=[('10.0.0.3', False, False)], ssh_user='root', variant_num=1, confirm_facts='y') @@ -773,9 +773,9 @@ class AttendedCliTests(OOCliFixture): mock_facts['10.0.0.2']['common']['version'] = "3.0.0" cli_input = build_input(hosts=[ - ('10.0.0.1', True), + ('10.0.0.1', True, False), ], - add_nodes=[('10.0.0.2', False)], + add_nodes=[('10.0.0.2', False, False)], ssh_user='root', variant_num=1, schedulable_masters_ok=True, @@ -796,10 +796,10 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True), - ('10.0.0.2', True), - ('10.0.0.3', True), - ('10.0.0.4', False)], + ('10.0.0.1', True, False), + ('10.0.0.2', True, False), + ('10.0.0.3', True, False), + ('10.0.0.4', False, False)], ssh_user='root', variant_num=1, confirm_facts='y', @@ -837,9 +837,9 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True), - ('10.0.0.2', True), - ('10.0.0.3', True)], + ('10.0.0.1', True, False), + ('10.0.0.2', True, False), + ('10.0.0.3', True, False)], ssh_user='root', variant_num=1, confirm_facts='y', @@ -872,10 +872,10 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True), - ('10.0.0.2', True), - ('10.0.0.3', False), - ('10.0.0.4', True)], + ('10.0.0.1', True, False), + ('10.0.0.2', True, False), + ('10.0.0.3', False, False), + ('10.0.0.4', True, False)], ssh_user='root', variant_num=1, confirm_facts='y', @@ -893,7 +893,7 @@ class AttendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 cli_input = build_input(hosts=[ - ('10.0.0.1', True)], + ('10.0.0.1', True, False)], ssh_user='root', variant_num=1, confirm_facts='y') @@ -913,6 +913,25 @@ class AttendedCliTests(OOCliFixture): self.assertEquals('True', inventory.get('nodes', '10.0.0.1 openshift_schedulable')) + #interactive 3.0 install confirm no HA hints + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_ha_hint(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS, 0) + run_playbook_mock.return_value = 0 + + cli_input = build_input(hosts=[ + ('10.0.0.1', True, False)], + ssh_user='root', + variant_num=2, + confirm_facts='y') + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, self.cli_args, + input=cli_input) + self.assert_result(result, 0) + self.assertTrue("NOTE: Add a total of 3 or more Masters to perform an HA installation." + not in result.output) + # TODO: test with config file, attended add node # TODO: test with config file, attended new node already in config file # TODO: test with config file, attended new node already in config file, plus manually added nodes diff --git a/utils/test/fixture.py b/utils/test/fixture.py index 90bd9e1ef..be759578a 100644 --- a/utils/test/fixture.py +++ b/utils/test/fixture.py @@ -138,7 +138,7 @@ class OOCliFixture(OOInstallFixture): self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) -#pylint: disable=too-many-arguments,too-many-branches +#pylint: disable=too-many-arguments,too-many-branches,too-many-statements def build_input(ssh_user=None, hosts=None, variant_num=None, add_nodes=None, confirm_facts=None, schedulable_masters_ok=None, master_lb=None): @@ -163,13 +163,19 @@ def build_input(ssh_user=None, hosts=None, variant_num=None, num_masters = 0 if hosts: i = 0 - for (host, is_master) in hosts: + for (host, is_master, is_containerized) in hosts: inputs.append(host) if is_master: inputs.append('y') num_masters += 1 else: inputs.append('n') + + if is_containerized: + inputs.append('container') + else: + inputs.append('rpm') + #inputs.append('rpm') # We should not be prompted to add more hosts if we're currently at # 2 masters, this is an invalid HA configuration, so this question @@ -196,8 +202,12 @@ def build_input(ssh_user=None, hosts=None, variant_num=None, inputs.append('y') inputs.append('1') # Add more nodes i = 0 - for (host, is_master) in add_nodes: + for (host, is_master, is_containerized) in add_nodes: inputs.append(host) + if is_containerized: + inputs.append('container') + else: + inputs.append('rpm') #inputs.append('rpm') if i < len(add_nodes) - 1: inputs.append('y') # Add more hosts |