summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README_AWS.md7
-rw-r--r--README_openstack.md8
-rw-r--r--filter_plugins/oo_filters.py23
-rw-r--r--filter_plugins/openshift_master.py3
-rw-r--r--inventory/byo/hosts.aep.example472
-rw-r--r--inventory/byo/hosts.aep_quickstart20
-rw-r--r--inventory/byo/hosts.origin.example63
-rw-r--r--inventory/byo/hosts.ose.example59
-rw-r--r--openshift-ansible.spec122
-rw-r--r--playbooks/adhoc/bootstrap-fedora.yml1
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/install.yml9
-rw-r--r--playbooks/adhoc/uninstall.yml5
-rw-r--r--playbooks/byo/openshift-cluster/config.yml5
l---------playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh1
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml12
-rw-r--r--playbooks/common/openshift-cluster/config.yml22
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml36
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml29
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml2
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/storage-pool.xml6
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml11
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml43
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml2
-rw-r--r--roles/cockpit-ui/meta/main.yml13
-rw-r--r--roles/cockpit-ui/tasks/main.yml44
-rw-r--r--roles/flannel/README.md18
-rw-r--r--roles/flannel/defaults/main.yaml7
-rw-r--r--roles/flannel_register/README.md2
-rw-r--r--roles/openshift_cloud_provider/tasks/main.yml8
-rw-r--r--roles/openshift_cloud_provider/tasks/openstack.yml2
-rw-r--r--roles/openshift_cloud_provider/templates/openstack.conf.j220
-rw-r--r--roles/openshift_cloud_provider/vars/main.yml6
-rw-r--r--roles/openshift_common/tasks/main.yml4
-rw-r--r--roles/openshift_examples/defaults/main.yml2
-rwxr-xr-xroles/openshift_examples/examples-sync.sh3
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/README.md24
-rw-r--r--roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml38
-rw-r--r--roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/registry-console.yaml124
-rw-r--r--roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml4
-rw-r--r--roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/registry-console.yaml124
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json88
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json88
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkinstemplate.json256
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py28
-rw-r--r--roles/openshift_facts/tasks/main.yml2
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml3
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml47
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j26
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j212
-rw-r--r--roles/openshift_master/templates/master_docker/master.docker.service.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j26
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j26
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml26
-rw-r--r--roles/openshift_master_facts/tasks/main.yml2
-rw-r--r--roles/openshift_master_facts/vars/main.yml7
-rw-r--r--roles/openshift_metrics/handlers/main.yml31
-rw-r--r--roles/openshift_metrics/tasks/install.yml114
-rw-r--r--roles/openshift_metrics/tasks/main.yaml125
-rw-r--r--roles/openshift_metrics/vars/main.yaml15
-rw-r--r--roles/openshift_node/tasks/main.yml12
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml8
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml6
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_repos/files/fedora-openshift-enterprise/gpg_keys/.gitkeep0
-rw-r--r--roles/openshift_repos/files/fedora-openshift-enterprise/repos/.gitkeep0
-rw-r--r--roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo8
-rw-r--r--roles/openshift_repos/files/online/repos/enterprise-v3.repo10
-rw-r--r--roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo11
-rw-r--r--roles/openshift_repos/files/openshift-enterprise/gpg_keys/.gitkeep0
-rw-r--r--roles/openshift_repos/files/openshift-enterprise/repos/.gitkeep0
-rw-r--r--roles/openshift_repos/files/removed/repos/epel7-openshift.repo0
-rw-r--r--roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo7
-rw-r--r--roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo0
-rw-r--r--roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo0
-rw-r--r--roles/openshift_repos/tasks/main.yaml60
-rw-r--r--roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml6
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml22
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j21
-rw-r--r--roles/openshift_version/tasks/main.yml4
-rw-r--r--utils/.gitignore3
-rw-r--r--utils/Makefile83
-rw-r--r--utils/README.md41
-rw-r--r--utils/README.txt24
-rw-r--r--utils/docs/config.md57
-rw-r--r--utils/src/ooinstall/__init__.py4
-rw-r--r--utils/src/ooinstall/ansible_plugins/facts_callback.py9
-rw-r--r--utils/src/ooinstall/cli_installer.py215
-rw-r--r--utils/src/ooinstall/oo_config.py150
-rw-r--r--utils/src/ooinstall/openshift_ansible.py58
-rw-r--r--utils/src/ooinstall/variants.py40
-rw-r--r--utils/test-requirements.txt11
-rw-r--r--utils/test/cli_installer_tests.py6
-rw-r--r--utils/test/fixture.py1
-rw-r--r--utils/test/oo_config_tests.py22
108 files changed, 1834 insertions, 1372 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 15e27417d..12d2214cc 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.3.12-1 ./
+3.4.1-1 ./
diff --git a/README_AWS.md b/README_AWS.md
index f3f98fed5..cccb122f6 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -135,7 +135,12 @@ Install Dependencies
--------------------
1. Ansible requires python-boto for aws operations:
-RHEL/CentOS/Fedora
+Fedora
+```
+ dnf install -y ansible python-boto pyOpenSSL
+```
+
+RHEL/CentOS
```
yum install -y ansible python-boto pyOpenSSL
```
diff --git a/README_openstack.md b/README_openstack.md
index e3cc7cc93..1998a5878 100644
--- a/README_openstack.md
+++ b/README_openstack.md
@@ -17,7 +17,12 @@ Install Dependencies
* `python-neutronclient`
* `python-heatclient`
-On RHEL / CentOS / Fedora:
+On Fedora:
+```
+ dnf install -y ansible python-novaclient python-neutronclient python-heatclient
+```
+
+On RHEL / CentOS:
```
yum install -y ansible python-novaclient python-neutronclient python-heatclient
```
@@ -43,6 +48,7 @@ The following options are used only by `heat_stack.yaml`. They are so used only
* `external_net` (default to `external`): Name of the external network to connect to
* `floating_ip_pool` (default to `external`): comma separated list of floating IP pools
* `ssh_from` (default to `0.0.0.0/0`): IPs authorized to connect to the VMs via ssh
+* `node_port_from` (default to `0.0.0.0/0`): IPs authorized to connect to the services exposed via nodePort
Creating a cluster
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index b57bc0afa..a4dceb679 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -17,6 +17,7 @@ import re
import json
import yaml
from ansible.utils.unicode import to_unicode
+from urlparse import urlparse
# Disabling too-many-public-methods, since filter methods are necessarily
# public
@@ -709,7 +710,7 @@ class FilterModule(object):
fsType=filesystem,
volumeID=volume_id)))
persistent_volumes.append(persistent_volume)
- elif kind != 'object':
+ elif not (kind == 'object' or kind == 'dynamic'):
msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
kind,
component)
@@ -733,7 +734,8 @@ class FilterModule(object):
if 'storage' in hostvars['openshift']['hosted'][component]:
kind = hostvars['openshift']['hosted'][component]['storage']['kind']
create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv']
- if kind != None and create_pv:
+ create_pvc = hostvars['openshift']['hosted'][component]['storage']['create_pvc']
+ if kind != None and create_pv and create_pvc:
volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name']
size = hostvars['openshift']['hosted'][component]['storage']['volume']['size']
access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes']
@@ -829,6 +831,22 @@ class FilterModule(object):
return version
+ @staticmethod
+ def oo_hostname_from_url(url):
+ """ Returns the hostname contained in a URL
+
+ Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
+ """
+ if not isinstance(url, basestring):
+ raise errors.AnsibleFilterError("|failed expects a string or unicode")
+ parse_result = urlparse(url)
+ if parse_result.netloc != '':
+ return parse_result.netloc
+ else:
+ # netloc wasn't parsed, assume url was missing scheme and path
+ return parse_result.path
+
+
def filters(self):
""" returns a mapping of filters to methods """
return {
@@ -859,5 +877,6 @@ class FilterModule(object):
"oo_get_hosts_from_hostvars": self.oo_get_hosts_from_hostvars,
"oo_image_tag_to_rpm_version": self.oo_image_tag_to_rpm_version,
"oo_merge_dicts": self.oo_merge_dicts,
+ "oo_hostname_from_url": self.oo_hostname_from_url,
"oo_merge_hostvars": self.oo_merge_hostvars,
}
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index d196d6c1f..ee6a62ba5 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -550,6 +550,9 @@ class FilterModule(object):
certs += ['openshift-master.crt',
'openshift-master.key',
'openshift-master.kubeconfig']
+ if bool(hostvars['openshift']['common']['version_gte_3_3_or_1_3']):
+ certs += ['service-signer.crt',
+ 'service-signer.key']
return certs
@staticmethod
diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example
deleted file mode 100644
index cff003a9c..000000000
--- a/inventory/byo/hosts.aep.example
+++ /dev/null
@@ -1,472 +0,0 @@
-# This is an example of a bring your own (byo) host inventory
-
-# Create an OSEv3 group that contains the masters and nodes groups
-[OSEv3:children]
-masters
-nodes
-etcd
-lb
-nfs
-
-# Set variables common for all OSEv3 hosts
-[OSEv3:vars]
-# SSH user, this user should allow ssh based auth without requiring a
-# password. If using ssh key based auth, then the key should be managed by an
-# ssh agent.
-ansible_ssh_user=root
-
-# If ansible_ssh_user is not root, ansible_become must be set to true and the
-# user must be configured for passwordless sudo
-#ansible_become=yes
-
-# Debug level for all Atomic Enterprise components (Defaults to 2)
-debug_level=2
-
-# deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
-deployment_type=atomic-enterprise
-
-# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
-# rely on the version running on the first master. Works best for containerized installs where we can usually
-# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
-# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
-# release.
-openshift_release=v3.2
-
-# Specify an exact container image tag to install or configure.
-# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
-# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.2.0.46
-
-# Specify an exact rpm version to install or configure.
-# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
-# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.2.0.46
-
-# Install the openshift examples
-#openshift_install_examples=true
-
-# Configure logoutURL in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
-#openshift_master_logout_url=http://example.com
-
-# Configure extensionScripts in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
-#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
-
-# Configure extensionStylesheets in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
-#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
-
-# Configure extensions in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
-#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
-
-# Configure extensions in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
-#openshift_master_oauth_template=/path/to/login-template.html
-
-# Configure metricsPublicURL in the master config for cluster metrics
-# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
-#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
-
-# Configure loggingPublicURL in the master config for aggregate logging
-# See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html
-#openshift_master_logging_public_url=https://kibana.example.com
-
-# Configure imagePolicyConfig in the master config
-# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
-#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
-
-# Docker Configuration
-# Add additional, insecure, and blocked registries to global docker configuration
-# For enterprise deployment types we ensure that registry.access.redhat.com is
-# included if you do not include it
-#openshift_docker_additional_registries=registry.example.com
-#openshift_docker_insecure_registries=registry.example.com
-#openshift_docker_blocked_registries=registry.hacker.com
-# Disable pushing to dockerhub
-#openshift_docker_disable_push_dockerhub=True
-# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
-# Default value: "--log-driver=json-file --log-opt max-size=50m"
-#openshift_docker_options="-l warn --ipv6=false"
-
-# Specify exact version of Docker to configure or upgrade to.
-# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
-# docker_version="1.10.3"
-
-# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
-# docker_upgrade=False
-
-# Alternate image format string. If you're not modifying the format string and
-# only need to inject your own registry you may want to consider
-# openshift_docker_additional_registries instead
-#oreg_url=example.com/aep3/aep-${component}:${version}
-# If oreg_url points to a registry other than registry.access.redhat.com we can
-# modify image streams to point at that registry by setting the following to true
-#openshift_examples_modify_imagestreams=True
-
-# Additional yum repos to install
-#openshift_additional_repos=[{'id': 'aep-devel', 'name': 'aep-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
-
-# htpasswd auth
-openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
-# Defining htpasswd users
-#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
-# or
-#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
-
-# Allow all auth
-#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
-
-# LDAP auth
-#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
-# Configuring the ldap ca certificate
-#openshift_master_ldap_ca=<ca text>
-# or
-#openshift_master_ldap_ca_file=<path to local ca file to use>
-
-# Available variables for configuring certificates for other identity providers:
-#openshift_master_openid_ca
-#openshift_master_openid_ca_file
-#openshift_master_request_header_ca
-#openshift_master_request_header_ca_file
-
-# Cloud Provider Configuration
-#
-# Note: You may make use of environment variables rather than store
-# sensitive configuration within the ansible inventory.
-# For example:
-#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
-#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
-#
-# AWS
-#openshift_cloudprovider_kind=aws
-# Note: IAM profiles may be used instead of storing API credentials on disk.
-#openshift_cloudprovider_aws_access_key=aws_access_key_id
-#openshift_cloudprovider_aws_secret_key=aws_secret_access_key
-#
-# Openstack
-#openshift_cloudprovider_kind=openstack
-#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
-#openshift_cloudprovider_openstack_username=username
-#openshift_cloudprovider_openstack_password=password
-#openshift_cloudprovider_openstack_tenant_id=tenant_id
-#openshift_cloudprovider_openstack_tenant_name=tenant_name
-#openshift_cloudprovider_openstack_region=region
-#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
-
-# Project Configuration
-#osm_project_request_message=''
-#osm_project_request_template=''
-#osm_mcs_allocator_range='s0:/2'
-#osm_mcs_labels_per_project=5
-#osm_uid_allocator_range='1000000000-1999999999/10000'
-
-# Configure additional projects
-#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}}
-
-# Enable cockpit
-#osm_use_cockpit=true
-#
-# Set cockpit plugins
-#osm_cockpit_plugins=['cockpit-kubernetes']
-
-# Native high availability cluster method with optional load balancer.
-# If no lb group is defined, the installer assumes that a load balancer has
-# been preconfigured. For installation the value of
-# openshift_master_cluster_hostname must resolve to the load balancer
-# or to one or all of the masters defined in the inventory if no load
-# balancer is present.
-#openshift_master_cluster_method=native
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-
-# Pacemaker high availability cluster method.
-# Pacemaker HA environment must be able to self provision the
-# configured VIP. For installation openshift_master_cluster_hostname
-# must resolve to the configured VIP.
-#openshift_master_cluster_method=pacemaker
-#openshift_master_cluster_password=openshift_cluster
-#openshift_master_cluster_vip=192.168.133.25
-#openshift_master_cluster_public_vip=192.168.133.25
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-
-# Override the default controller lease ttl
-#osm_controller_lease_ttl=30
-
-# Configure controller arguments
-#osm_controller_args={'resource-quota-sync-period': ['10s']}
-
-# Configure api server arguments
-#osm_api_server_args={'max-requests-inflight': ['400']}
-
-# default subdomain to use for exposed routes
-#openshift_master_default_subdomain=apps.test.example.com
-
-# additional cors origins
-#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
-
-# default project node selector
-#osm_default_node_selector='region=primary'
-
-# Override the default pod eviction timeout
-#openshift_master_pod_eviction_timeout=5m
-
-# Override the default oauth tokenConfig settings:
-# openshift_master_access_token_max_seconds=86400
-# openshift_master_auth_token_max_seconds=500
-
-# Override master servingInfo.maxRequestsInFlight
-#openshift_master_max_requests_inflight=500
-
-# default storage plugin dependencies to install, by default the ceph and
-# glusterfs plugin dependencies will be installed, if available.
-#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
-
-# OpenShift Router Options
-#
-# An OpenShift router will be created during install if there are
-# nodes present with labels matching the default router selector,
-# "region=infra". Set openshift_node_labels per node as needed in
-# order to label nodes.
-#
-# Example:
-# [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
-#
-# Router selector (optional)
-# Router will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_router_selector='region=infra'
-#
-# Router replicas (optional)
-# Unless specified, openshift-ansible will calculate the replica count
-# based on the number of nodes matching the openshift router selector.
-#openshift_hosted_router_replicas=2
-#
-# Router force subdomain (optional)
-# A router path format to force on all routes used by this router
-# (will ignore the route host value)
-#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
-#
-# Router certificate (optional)
-# Provide local certificate paths which will be configured as the
-# router's default certificate.
-#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
-
-# Openshift Registry Options
-#
-# An OpenShift registry will be created during install if there are
-# nodes present with labels matching the default registry selector,
-# "region=infra". Set openshift_node_labels per node as needed in
-# order to label nodes.
-#
-# Example:
-# [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
-#
-# Registry selector (optional)
-# Registry will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_registry_selector='region=infra'
-#
-# Registry replicas (optional)
-# Unless specified, openshift-ansible will calculate the replica count
-# based on the number of nodes matching the openshift registry selector.
-#openshift_hosted_registry_replicas=2
-
-# Registry Storage Options
-#
-# NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/registry"
-#openshift_hosted_registry_storage_kind=nfs
-#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
-#openshift_hosted_registry_storage_nfs_directory=/exports
-#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_registry_storage_volume_name=registry
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/registry"
-#openshift_hosted_registry_storage_kind=nfs
-#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
-#openshift_hosted_registry_storage_host=nfs.example.com
-#openshift_hosted_registry_storage_nfs_directory=/exports
-#openshift_hosted_registry_storage_volume_name=registry
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# Openstack
-# Volume must already exist.
-#openshift_hosted_registry_storage_kind=openstack
-#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_registry_storage_openstack_filesystem=ext4
-#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# AWS S3
-# S3 bucket must already exist.
-#openshift_hosted_registry_storage_kind=object
-#openshift_hosted_registry_storage_provider=s3
-#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
-#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
-#openshift_hosted_registry_storage_s3_bucket=bucket_name
-#openshift_hosted_registry_storage_s3_region=bucket_region
-#openshift_hosted_registry_storage_s3_chunksize=26214400
-
-# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
-# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
-
-# Disable the OpenShift SDN plugin
-# openshift_use_openshift_sdn=False
-
-# Configure SDN cluster network and kubernetes service CIDR blocks. These
-# network blocks should be private and should not conflict with network blocks
-# in your infrastructure that pods may require access to. Can not be changed
-# after deployment.
-#osm_cluster_network_cidr=10.1.0.0/16
-#openshift_portal_net=172.30.0.0/16
-
-# Configure number of bits to allocate to each host’s subnet e.g. 8
-# would mean a /24 network on the host.
-#osm_host_subnet_length=8
-
-# Configure master API and console ports.
-#openshift_master_api_port=8443
-#openshift_master_console_port=8443
-
-# set RPM version for debugging purposes
-#openshift_pkg_version=-3.1.0.0
-
-# Configure custom named certificates
-# NOTE: openshift_master_named_certificates is cached on masters and is an
-# additive fact, meaning that each run with a different set of certificates
-# will add the newly provided certificates to the cached set of certificates.
-#
-# An optional CA may be specified for each named certificate. CAs will
-# be added to the OpenShift CA bundle which allows for the named
-# certificate to be served for internal cluster communication.
-#
-# If you would like openshift_master_named_certificates to be overwritten with
-# the provided value, specify openshift_master_overwrite_named_certificates.
-#openshift_master_overwrite_named_certificates=true
-#
-# Provide local certificate paths which will be deployed to masters
-#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
-#
-# Detected names may be overridden by specifying the "names" key
-#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
-
-# Session options
-#openshift_master_session_name=ssn
-#openshift_master_session_max_seconds=3600
-
-# An authentication and encryption secret will be generated if secrets
-# are not provided. If provided, openshift_master_session_auth_secrets
-# and openshift_master_encryption_secrets must be equal length.
-#
-# Signing secrets, used to authenticate sessions using
-# HMAC. Recommended to use secrets with 32 or 64 bytes.
-#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
-#
-# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
-# characters long, to select AES-128, AES-192, or AES-256.
-#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
-
-# configure how often node iptables rules are refreshed
-#openshift_node_iptables_sync_period=5s
-
-# Configure nodeIP in the node config
-# This is needed in cases where node traffic is desired to go over an
-# interface other than the default network interface.
-#openshift_node_set_node_ip=True
-
-# Force setting of system hostname when configuring OpenShift
-# This works around issues related to installations that do not have valid dns
-# entries for the interfaces attached to the host.
-#openshift_set_hostname=True
-
-# Configure dnsIP in the node config
-#openshift_dns_ip=172.30.0.1
-
-# Configure node kubelet arguments
-#openshift_node_kubelet_args={'max-pods': ['110'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
-
-# Configure logrotate scripts
-# See: https://github.com/nickhammond/ansible-logrotate
-#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
-
-# openshift-ansible will wait indefinitely for your input when it detects that the
-# value of openshift_hostname resolves to an IP address not bound to any local
-# interfaces. This mis-configuration is problematic for any pod leveraging host
-# networking and liveness or readiness probes.
-# Setting this variable to true will override that check.
-#openshift_override_hostname_check=true
-
-# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
-# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
-# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
-# be used with 1.0 and 3.0.
-# openshift_use_dnsmasq=False
-
-# Global Proxy Configuration
-# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
-# variables for docker and master services.
-#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
-#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
-#openshift_no_proxy='.hosts.example.com,some-host.com'
-#
-# Most environments don't require a proxy between openshift masters, nodes, and
-# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
-# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above.
-#openshift_generate_no_proxy_hosts=True
-#
-# These options configure the BuildDefaults admission controller which injects
-# environment variables into Builds. These values will default to the global proxy
-# config values. You only need to set these if they differ from the global settings
-# above. See BuildDefaults
-# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
-#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_no_proxy=build_defaults
-#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
-# Or you may optionally define your own serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
-
-# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
-#openshift_master_dynamic_provisioning_enabled=False
-
-# Configure usage of openshift_clock role.
-#openshift_clock_enabled=true
-
-# OpenShift Per-Service Environment Variables
-# Environment variables are added to /etc/sysconfig files for
-# each OpenShift service: node, master (api and controllers).
-# API and controllers environment variables are merged in single
-# master environments.
-#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
-#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
-#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
-
-# host group for masters
-[masters]
-aep3-master[1:3]-ansible.test.example.com
-
-[etcd]
-aep3-etcd[1:3]-ansible.test.example.com
-
-[lb]
-aep3-lb-ansible.test.example.com
-
-# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
-# However, in order to ensure that your masters are not burdened with running pods you should
-# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
-[nodes]
-aep3-master[1:3]-ansible.test.example.com
-aep3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/inventory/byo/hosts.aep_quickstart b/inventory/byo/hosts.aep_quickstart
deleted file mode 100644
index 46ea3a03f..000000000
--- a/inventory/byo/hosts.aep_quickstart
+++ /dev/null
@@ -1,20 +0,0 @@
-[OSEv3:children]
-masters
-nodes
-etcd
-lb
-
-[OSEv3:vars]
-ansible_ssh_user=root
-deployment_type=atomic-enterprise
-osm_use_cockpit=true
-
-[masters]
-ose3-master.example.com
-
-[nodes]
-ose3-master.example.com openshift_scheduleable=True
-
-[etcd]
-
-[lb]
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 8e7883f3b..4124ecb31 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -65,10 +65,6 @@ openshift_release=v1.2
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_oauth_template=/path/to/login-template.html
-# Configure metricsPublicURL in the master config for cluster metrics
-# See: https://docs.openshift.org/latest/install_config/cluster_metrics.html
-#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
-
# Configure loggingPublicURL in the master config for aggregate logging
# See: https://docs.openshift.org/latest/install_config/aggregate_logging.html
#openshift_master_logging_public_url=https://kibana.example.com
@@ -109,10 +105,6 @@ openshift_release=v1.2
# Origin copr repo
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
-# Origin Fedora copr repo
-# Use this if you are installing on Fedora
-#openshift_additional_repos=[{'id': 'fedora-openshift-origin-copr', 'name': 'OpenShift Origin COPR for Fedora', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg'}]
-
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
@@ -330,6 +322,49 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_storage_s3_chunksize=26214400
#openshift_hosted_registry_pullthrough=true
+# Metrics deployment
+# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
+#
+# By default metrics are not automatically deployed, set this to enable them
+# openshift_hosted_metrics_deploy=true
+#
+# Storage Options
+# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
+# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
+# Storage options A & B currently support only one cassandra pod which is
+# generally enough for up to 1000 pods. Additional volumes can be created
+# manually after the fact and metrics scaled per the docs.
+#
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/metrics"
+#openshift_hosted_metrics_storage_kind=nfs
+#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_metrics_storage_nfs_directory=/exports
+#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_metrics_storage_volume_name=metrics
+#openshift_hosted_metrics_storage_volume_size=10Gi
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/metrics"
+#openshift_hosted_metrics_storage_kind=nfs
+#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_metrics_storage_host=nfs.example.com
+#openshift_hosted_metrics_storage_nfs_directory=/exports
+#openshift_hosted_metrics_storage_volume_name=metrics
+#openshift_hosted_metrics_storage_volume_size=10Gi
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_hosted_metrics_storage_kind=dynamic
+#
+# Override metricsPublicURL in the master config for cluster metrics
+# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
+#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -343,6 +378,16 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#osm_cluster_network_cidr=10.1.0.0/16
#openshift_portal_net=172.30.0.0/16
+
+# ExternalIPNetworkCIDRs controls what values are acceptable for the
+# service external IP field. If empty, no externalIP may be set. It
+# may contain a list of CIDRs which are checked for access. If a CIDR
+# is prefixed with !, IPs in that CIDR will be rejected. Rejections
+# will be applied first, then the IP checked against one of the
+# allowed CIDRs. You should ensure this range does not overlap with
+# your nodes, pods, or service CIDRs for security reasons.
+#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
+
# Configure number of bits to allocate to each host’s subnet e.g. 8
# would mean a /24 network on the host.
#osm_host_subnet_length=8
@@ -420,7 +465,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments
-#openshift_node_kubelet_args={'max-pods': ['110'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 0d358146c..b617d2a03 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -65,10 +65,6 @@ openshift_release=v3.2
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_oauth_template=/path/to/login-template.html
-# Configure metricsPublicURL in the master config for cluster metrics
-# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
-#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
-
# Configure loggingPublicURL in the master config for aggregate logging
# See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html
#openshift_master_logging_public_url=https://kibana.example.com
@@ -325,6 +321,50 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_storage_s3_chunksize=26214400
#openshift_hosted_registry_pullthrough=true
+# Metrics deployment
+# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
+#
+# By default metrics are not automatically deployed, set this to enable them
+# openshift_hosted_metrics_deploy=true
+#
+# Storage Options
+# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
+# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
+# Storage options A & B currently support only one cassandra pod which is
+# generally enough for up to 1000 pods. Additional volumes can be created
+# manually after the fact and metrics scaled per the docs.
+#
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/metrics"
+#openshift_hosted_metrics_storage_kind=nfs
+#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_metrics_storage_nfs_directory=/exports
+#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_metrics_storage_volume_name=metrics
+#openshift_hosted_metrics_storage_volume_size=10Gi
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/metrics"
+#openshift_hosted_metrics_storage_kind=nfs
+#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_metrics_storage_host=nfs.example.com
+#openshift_hosted_metrics_storage_nfs_directory=/exports
+#openshift_hosted_metrics_storage_volume_name=metrics
+#openshift_hosted_metrics_storage_volume_size=10Gi
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_hosted_metrics_storage_kind=dynamic
+#
+# Override metricsPublicURL in the master config for cluster metrics
+# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
+#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+
+
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -339,6 +379,15 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_portal_net=172.30.0.0/16
+# ExternalIPNetworkCIDRs controls what values are acceptable for the
+# service external IP field. If empty, no externalIP may be set. It
+# may contain a list of CIDRs which are checked for access. If a CIDR
+# is prefixed with !, IPs in that CIDR will be rejected. Rejections
+# will be applied first, then the IP checked against one of the
+# allowed CIDRs. You should ensure this range does not overlap with
+# your nodes, pods, or service CIDRs for security reasons.
+#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
+
# Configure number of bits to allocate to each host’s subnet e.g. 8
# would mean a /24 network on the host.
#osm_host_subnet_length=8
@@ -416,7 +465,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments
-#openshift_node_kubelet_args={'max-pods': ['110'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 976bbeeb6..5042743f8 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.3.12
+Version: 3.4.1
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -221,6 +221,126 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Sep 01 2016 Scott Dodson <sdodson@redhat.com> 3.4.1-1
+- Bump to 3.4.0
+
+* Wed Aug 31 2016 Scott Dodson <sdodson@redhat.com> 3.3.20-1
+- Restore network plugin configuration (sdodson@redhat.com)
+- Remove openshift_master_metrics_public_url (abutcher@redhat.com)
+- Bug 1371836 - The variant should be Registry 3.3 (smunilla@redhat.com)
+
+* Wed Aug 31 2016 Troy Dawson <tdawson@redhat.com> 3.3.19-1
+- update flannel_subnet_len default value (mkumatag@in.ibm.com)
+- Reload docker facts after upgrading docker (sdodson@redhat.com)
+
+* Tue Aug 30 2016 Scott Dodson <sdodson@redhat.com> 3.3.18-1
+- Enable dynamic storage (sdodson@redhat.com)
+- Change how we set master's metricsPublicURL (sdodson@redhat.com)
+- update kubelet argument example with references to new pods-per-core and new
+ max-pods threshold for 3.3 (jeder@redhat.com)
+- update kubelet argument example with references to new pods-per-core and new
+ max-pods threshold for 3.3 (jeder@redhat.com)
+
+* Mon Aug 29 2016 Scott Dodson <sdodson@redhat.com> 3.3.17-1
+- Reload units after node container service modified. (dgoodwin@redhat.com)
+- Fix flannel check (mkumatag@in.ibm.com)
+- Default to port 80 when deploying cockpit-ui (smunilla@redhat.com)
+- Set cloudprovider kind with openshift_facts. (abutcher@redhat.com)
+- Fix openstack cloudprovider template conditional. (abutcher@redhat.com)
+
+* Sat Aug 27 2016 Scott Dodson <sdodson@redhat.com> 3.3.16-1
+- Sync image stream data (sdodson@redhat.com)
+- Update metrics example inventories (sdodson@redhat.com)
+- Preserve AWS options in sysconfig files. (dgoodwin@redhat.com)
+- Fix metrics for containerized installs (sdodson@redhat.com)
+- Cleanup items botched during rebase (sdodson@redhat.com)
+- add check for server and account already exist (mangirdas@judeikis.lt)
+- add run_once to repeatable actions (mangirdas@judeikis.lt)
+- Remove atomic check and cockpit.socket (smunilla@redhat.com)
+- Re-organize registry-console deployment. (abutcher@redhat.com)
+- Add registry console template (aweiteka@redhat.com)
+- Add support for Atomic Registry Installs (smunilla@redhat.com)
+- Apply indentation changes to some other lines (tbielawa@redhat.com)
+- Don't use openshift_env for cloud provider facts. (abutcher@redhat.com)
+- Enable PEP8 tests by default in the 'make ci' target now
+ (tbielawa@redhat.com)
+- Fix PEP8 errors in cli_installer.py (tbielawa@redhat.com)
+- Fix PEP8 in openshift_ansible.py (tbielawa@redhat.com)
+- Fix PEP8 in oo_config.py (tbielawa@redhat.com)
+- Fix PEP8 in variants.py (tbielawa@redhat.com)
+- Fix PEP8 in facts_callback.py (tbielawa@redhat.com)
+- fix duplicate src field (jdetiber@redhat.com)
+- Refactor volume directory creation (sdodson@redhat.com)
+- Rely on IMAGE_PREFIX and IMAGE_VERSION defaults from the templates themselves
+ (sdodson@redhat.com)
+- Add metrics exports to nfs role, move exports to /etc/exports.d/openshift-
+ ansible.exports (sdodson@redhat.com)
+- Add ability to disable pvc creation (sdodson@redhat.com)
+- Fix registry volume (sdodson@redhat.com)
+- add selectors for metrics and logging (sdodson@redhat.com)
+- Add logic to detect existing installs (sdodson@redhat.com)
+- Deploy metrics after our router (sdodson@redhat.com)
+- Add Enterprise 3.3 template (sdodson@redhat.com)
+- Pull in keynote demo changes (sdodson@redhat.com)
+- [tags] add some support for running a subset of config via tags
+ (jdetiber@redhat.com)
+- [metrics] add filter to clean up hostname for use in metrics deployment
+ (jdetiber@redhat.com)
+- enable service-serving-cert-signer by default (abutcher@redhat.com)
+- Fix review comments (mkumatag@in.ibm.com)
+- Remove duplicate flannel registration (mkumatag@in.ibm.com)
+
+* Wed Aug 24 2016 Scott Dodson <sdodson@redhat.com> 3.3.15-1
+- simplify repo configuration (jdetiber@redhat.com)
+- don't set virt_sandbox_use_nfs on Fedora, it was replaced by virt_use_nfs
+ (maxamillion@fedoraproject.org)
+- Correct flannel cert variables. (abutcher@redhat.com)
+- Make note about ansible/install logs messing up ci tests
+ (tbielawa@redhat.com)
+- remove fedora origin copr (it's in mainline fedora now), some dnf/yum clean
+ up (maxamillion@fedoraproject.org)
+- Move nested print_read_config_error function into it's own function
+ (tbielawa@redhat.com)
+- Makefile includes ci-pyflakes target now (tbielawa@redhat.com)
+- Fix BZ1368296 by quietly recollecting facts if the cache is removed
+ (tbielawa@redhat.com)
+- Correct masterCA config typo. (abutcher@redhat.com)
+- don't gather facts when bootstrapping ansible for Fedora hosts
+ (maxamillion@fedoraproject.org)
+- a-o-i: Add variant and variant_version to migration (smunilla@redhat.com)
+- Fix upgrade failure when master-config does not have pluginOrderOverride.
+ (dgoodwin@redhat.com)
+- Add externalIPNetworkCIDRs to config (smunilla@redhat.com)
+
+* Tue Aug 23 2016 Scott Dodson <sdodson@redhat.com> 3.3.14-1
+- a-o-i: Fix ansible_ssh_user question (smunilla@redhat.com)
+- Don't run node config upgrade hook if host is not a node.
+ (dgoodwin@redhat.com)
+- Link ca to ca-bundle when ca-bundle does not exist. (abutcher@redhat.com)
+- Better error if no OpenShift RPMs are available. (dgoodwin@redhat.com)
+- Revert "Due to problems with with_fileglob lets avoid using it for now"
+ (sdodson@redhat.com)
+- Replace some virsh commands by native virt_XXX ansible module
+ (lhuard@amadeus.com)
+- Add warning at end of 3.3 upgrade if pluginOrderOverride is found.
+ (dgoodwin@redhat.com)
+- a-o-i: Remove Legacy Config Upgrade (smunilla@redhat.com)
+- Fix etcd uninstall (sdodson@redhat.com)
+- Bug 1358951 - Error loading config, no such key: 'deployment' when using
+ previously valid answers file (smunilla@redhat.com)
+- Fix standalone Docker upgrade missing symlink. (dgoodwin@redhat.com)
+- Open OpenStack security group for the service node port range
+ (lhuard@amadeus.com)
+- Fix the “node on master” feature (lhuard@amadeus.com)
+- Due to problems with with_fileglob lets avoid using it for now
+ (sdodson@redhat.com)
+
+* Fri Aug 19 2016 Troy Dawson <tdawson@redhat.com> 3.3.13-1
+- Fix warnings in OpenStack provider with ansible 2.1 (lhuard@amadeus.com)
+- Mount /sys rw (sdodson@redhat.com)
+- Update uninstall.yml (sdodson@redhat.com)
+- Fix padding on registry config (sdodson@redhat.com)
+
* Wed Aug 17 2016 Troy Dawson <tdawson@redhat.com> 3.3.12-1
- Fixes to typos, grammar, and product branding in cli_installer
(tpoitras@redhat.com)
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml
index 5ca383a37..b370d7fba 100644
--- a/playbooks/adhoc/bootstrap-fedora.yml
+++ b/playbooks/adhoc/bootstrap-fedora.yml
@@ -1,4 +1,5 @@
- hosts: OSEv3
+ gather_facts: false
tasks:
- name: install python and deps for ansible modules
raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python python2-firewall pyOpenSSL python-cryptography
diff --git a/playbooks/adhoc/metrics_setup/playbooks/install.yml b/playbooks/adhoc/metrics_setup/playbooks/install.yml
index 235f775ef..a9ec3c1ef 100644
--- a/playbooks/adhoc/metrics_setup/playbooks/install.yml
+++ b/playbooks/adhoc/metrics_setup/playbooks/install.yml
@@ -16,21 +16,30 @@
- name: "Add metrics-deployer"
command: "{{item}}"
+ run_once: true
+ register: output
+ failed_when: ('already exists' not in output.stderr) and (output.rc != 0)
with_items:
- oc project openshift-infra
- oc create -f /tmp/metrics-deployer-setup.yaml
- name: "Give metrics-deployer SA permissions"
command: "oadm policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer"
+ run_once: true
- name: "Give heapster SA permissions"
command: "oadm policy add-cluster-role-to-user cluster-reader system:serviceaccount:openshift-infra:heapster"
+ run_once: true
- name: "Create metrics-deployer secret"
command: "oc secrets new metrics-deployer nothing=/dev/null"
+ register: output
+ failed_when: ('already exists' not in output.stderr) and (output.rc != 0)
+ run_once: true
- name: "Copy metrics.yaml to remote"
copy: "src=../files/metrics.yaml dest=/tmp/metrics.yaml force=yes"
- name: "Process yml template"
shell: "oc process -f /tmp/metrics.yaml -v MASTER_URL={{ masterPublicURL }},REDEPLOY=true,HAWKULAR_METRICS_HOSTNAME={{ metrics_external_service }},IMAGE_PREFIX={{ metrics_image_prefix }},IMAGE_VERSION={{ metrics_image_version }},USE_PERSISTENT_STORAGE=false | oc create -f -"
+ run_once: true \ No newline at end of file
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 4edd44fe4..3be3a0e96 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -338,7 +338,10 @@
- /etc/ansible/facts.d/openshift.fact
- /etc/etcd
- /etc/systemd/system/etcd_container.service
- - /var/lib/etcd
+
+ - name: Remove etcd data
+ shell: rm -rf /var/lib/etcd/*
+ failed_when: false
- hosts: lb
become: yes
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 0a931fbe0..0b85b2485 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -5,6 +5,8 @@
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- add_host:
@@ -14,6 +16,8 @@
- hosts: l_oo_all_hosts
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
@@ -22,3 +26,4 @@
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
openshift_deployment_type: "{{ deployment_type }}"
+ openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh
new file mode 120000
index 000000000..d5d864b63
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh
@@ -0,0 +1 @@
+../../../../common/openshift-cluster/upgrades/files/nuke_images.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index e9fb3de96..26b31d313 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -1,11 +1,3 @@
-- name: Configure flannel
- hosts: oo_first_master
- vars:
- etcd_urls: "{{ openshift.master.etcd_urls }}"
- roles:
- - role: flannel_register
- when: openshift.common.use_flannel | bool
-
- name: Additional master configuration
hosts: oo_first_master
vars:
@@ -23,8 +15,6 @@
when: openshift.common.use_manageiq | bool
- role: cockpit
when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
- (osm_use_cockpit | bool or osm_use_cockpit is undefined )
+ (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' )
- role: flannel_register
when: openshift.common.use_flannel | bool
-
-
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 5cf5df08e..d6a99fcda 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,14 +1,22 @@
---
- include: evaluate_groups.yml
+ tags:
+ - always
- include: initialize_facts.yml
+ tags:
+ - always
- include: validate_hostnames.yml
+ tags:
+ - node
- include: initialize_openshift_version.yml
- name: Set oo_options
hosts: oo_all_hosts
+ tags:
+ - always
tasks:
- set_fact:
openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
@@ -30,15 +38,29 @@
when: openshift_docker_log_options is not defined
- include: ../openshift-etcd/config.yml
+ tags:
+ - etcd
- include: ../openshift-nfs/config.yml
+ tags:
+ - nfs
- include: ../openshift-loadbalancer/config.yml
+ tags:
+ - loadbalancer
- include: ../openshift-master/config.yml
+ tags:
+ - master
- include: additional_config.yml
+ tags:
+ - master
- include: ../openshift-node/config.yml
+ tags:
+ - node
- include: openshift_hosted.yml
+ tags:
+ - hosted
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 3fb42a7fa..b3e02fb97 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -77,7 +77,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | default([]) }}"
- when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined
+ when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
- name: Evaluate oo_first_etcd
add_host:
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 4d4a09828..f65b7a2cd 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,5 +1,8 @@
+---
- name: Create persistent volumes
hosts: oo_first_master
+ tags:
+ - hosted
vars:
persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
@@ -9,6 +12,8 @@
- name: Create Hosted Resources
hosts: oo_first_master
+ tags:
+ - hosted
pre_tasks:
- set_fact:
openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
@@ -36,6 +41,8 @@
openshift_serviceaccounts_sccs:
- privileged
when: not openshift.common.version_gte_3_2_or_1_2
+ - role: openshift_hosted
- role: openshift_metrics
when: openshift.hosted.metrics.deploy | bool
- - role: openshift_hosted
+ - role: cockpit-ui
+ when: ( openshift.common.deployment_subtype == 'registry' )
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
index 03e7b844c..417096dd0 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
@@ -39,6 +39,10 @@
- service: name=docker state=started
+- name: Update docker facts
+ openshift_facts:
+ role: docker
+
- name: Restart containerized services
service: name={{ item }} state=started
with_items:
diff --git a/playbooks/common/openshift-cluster/upgrades/post.yml b/playbooks/common/openshift-cluster/upgrades/post.yml
index bd97d0b34..e43954453 100644
--- a/playbooks/common/openshift-cluster/upgrades/post.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post.yml
@@ -56,3 +56,17 @@
{{ oc_cmd }} patch dc/docker-registry -n default -p
'{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
--api-version=v1
+
+# Check for warnings to be printed at the end of the upgrade:
+- name: Check for warnings
+ hosts: oo_masters_to_config
+ tasks:
+ # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
+ - command: >
+ grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
+ register: grep_plugin_order_override
+ when: openshift.common.version_gte_3_3_or_1_3 | bool
+ failed_when: false
+ - name: Warn if pluginOrderOverride is in use in master-config.yaml
+ debug: msg="WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."
+ when: not grep_plugin_order_override | skipped and grep_plugin_order_override.rc == 0
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade.yml
index f7ff16fb8..be4e02c4a 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade.yml
@@ -188,7 +188,7 @@
- include: docker/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- include: "{{ node_config_hook }}"
- when: node_config_hook is defined
+ when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config
- include: rpm_upgrade.yml
vars:
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 1d818eea0..7f60cd9e4 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -48,6 +48,12 @@
- set_fact:
openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}"
when: openshift_hosted_metrics_resolution is not defined
+ - set_fact:
+ openshift_hosted_metrics_deployer_prefix: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_prefix') | default('openshift') }}"
+ when: openshift_hosted_metrics_deployer_prefix is not defined
+ - set_fact:
+ openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}"
+ when: openshift_hosted_metrics_deployer_prefix is not defined
roles:
- openshift_facts
post_tasks:
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
index 3117d9edc..b42ca83af 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
@@ -1,27 +1,11 @@
---
-- name: Test if libvirt network for openshift already exists
- command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}"
- register: net_info_result
- changed_when: False
- failed_when: "net_info_result.rc != 0 and 'no network with matching name' not in net_info_result.stderr"
-
-- name: Create a temp directory for the template xml file
- command: "mktemp -d /tmp/openshift-ansible-XXXXXXX"
- register: mktemp
- when: net_info_result.rc == 1
-
-- name: Create network xml file
- template:
- src: templates/network.xml
- dest: "{{ mktemp.stdout }}/network.xml"
- when: net_info_result.rc == 1
-
-- name: Create libvirt network for openshift
- command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml"
- when: net_info_result.rc == 1
-
-- name: Remove the temp directory
- file:
- path: "{{ mktemp.stdout }}"
- state: absent
- when: net_info_result.rc == 1
+- name: Create the libvirt network for OpenShift
+ virt_net:
+ name: '{{ libvirt_network }}'
+ state: '{{ item }}'
+ autostart: 'yes'
+ xml: "{{ lookup('template', 'network.xml') }}"
+ uri: '{{ libvirt_uri }}'
+ with_items:
+ - present
+ - active
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
index 397158b9e..8685624ec 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
@@ -6,22 +6,25 @@
# We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set.
- acl:
- default: "{{ item }}"
+ default: '{{ item.default }}'
entity: kvm
etype: group
name: "{{ libvirt_storage_pool_path }}"
- permissions: rwx
+ permissions: '{{ item.permissions }}'
state: present
with_items:
- - no
- - yes
+ - default: no
+ permissions: x
+ - default: yes
+ permissions: rwx
-- name: Test if libvirt storage pool for openshift already exists
- command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}"
- register: pool_info_result
- changed_when: False
- failed_when: "pool_info_result.rc != 0 and 'no storage pool with matching name' not in pool_info_result.stderr"
-
-- name: Create the libvirt storage pool for openshift
- command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
- when: pool_info_result.rc == 1
+- name: Create the libvirt storage pool for OpenShift
+ virt_pool:
+ name: '{{ libvirt_storage_pool }}'
+ state: '{{ item }}'
+ autostart: 'yes'
+ xml: "{{ lookup('template', 'storage-pool.xml') }}"
+ uri: '{{ libvirt_uri }}'
+ with_items:
+ - present
+ - active
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index cc34d0ef9..e0afc43ba 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -134,4 +134,4 @@
delay: 1
with_together:
- '{{ instances }}'
- - '{{ ips }}' \ No newline at end of file
+ - '{{ ips }}'
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
index 050bc7ab9..0ce2a8342 100644
--- a/playbooks/libvirt/openshift-cluster/templates/network.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/network.xml
@@ -1,5 +1,5 @@
<network>
- <name>openshift-ansible</name>
+ <name>{{ libvirt_network }}</name>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
diff --git a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml
new file mode 100644
index 000000000..da139afd0
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml
@@ -0,0 +1,6 @@
+<pool type='dir'>
+ <name>{{ libvirt_storage_pool }}</name>
+ <target>
+ <path>{{ libvirt_storage_pool_path }}</path>
+ </target>
+</pool>
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 2d0098784..458cf5ac7 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -42,6 +42,12 @@ parameters:
description: Source of legitimate ssh connections
default: 0.0.0.0/0
+ node_port_incoming:
+ type: string
+ label: Source of node port connections
+ description: Authorized sources targetting node ports
+ default: 0.0.0.0/0
+
num_etcd:
type: number
label: Number of etcd nodes
@@ -393,6 +399,11 @@ resources:
port_range_min: 4789
port_range_max: 4789
remote_mode: remote_group_id
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 30000
+ port_range_max: 32767
+ remote_ip_prefix: { get_param: node_port_incoming }
infra-secgrp:
type: OS::Neutron::SecurityGroup
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index b9aae2f4c..5cf543204 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -33,6 +33,7 @@
-P external_net={{ openstack_network_external_net }}
-P ssh_public_key="{{ openstack_ssh_public_key }}"
-P ssh_incoming={{ openstack_ssh_access_from }}
+ -P node_port_incoming={{ openstack_node_port_access_from }}
-P num_etcd={{ num_etcd }}
-P num_masters={{ num_masters }}
-P num_nodes={{ num_nodes }}
@@ -48,6 +49,8 @@
-P infra_flavor={{ openstack_flavor["infra"] }}
-P dns_flavor={{ openstack_flavor["dns"] }}
openshift-ansible-{{ cluster_id }}-stack'
+ args:
+ chdir: '{{ playbook_dir }}'
- name: Wait for OpenStack Stack readiness
shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
@@ -107,9 +110,9 @@
openshift_node_labels:
type: "etcd"
with_together:
- - parsed_outputs.etcd_names
- - parsed_outputs.etcd_ips
- - parsed_outputs.etcd_floating_ips
+ - '{{ parsed_outputs.etcd_names }}'
+ - '{{ parsed_outputs.etcd_ips }}'
+ - '{{ parsed_outputs.etcd_floating_ips }}'
- name: Add new master instances groups and variables
add_host:
@@ -121,9 +124,9 @@
openshift_node_labels:
type: "master"
with_together:
- - parsed_outputs.master_names
- - parsed_outputs.master_ips
- - parsed_outputs.master_floating_ips
+ - '{{ parsed_outputs.master_names }}'
+ - '{{ parsed_outputs.master_ips }}'
+ - '{{ parsed_outputs.master_floating_ips }}'
- name: Add new node instances groups and variables
add_host:
@@ -135,9 +138,9 @@
openshift_node_labels:
type: "compute"
with_together:
- - parsed_outputs.node_names
- - parsed_outputs.node_ips
- - parsed_outputs.node_floating_ips
+ - '{{ parsed_outputs.node_names }}'
+ - '{{ parsed_outputs.node_ips }}'
+ - '{{ parsed_outputs.node_floating_ips }}'
- name: Add new infra instances groups and variables
add_host:
@@ -149,9 +152,9 @@
openshift_node_labels:
type: "infra"
with_together:
- - parsed_outputs.infra_names
- - parsed_outputs.infra_ips
- - parsed_outputs.infra_floating_ips
+ - '{{ parsed_outputs.infra_names }}'
+ - '{{ parsed_outputs.infra_ips }}'
+ - '{{ parsed_outputs.infra_floating_ips }}'
- name: Add DNS groups and variables
add_host:
@@ -166,10 +169,10 @@
host: '{{ item }}'
port: 22
with_flattened:
- - parsed_outputs.master_floating_ips
- - parsed_outputs.node_floating_ips
- - parsed_outputs.infra_floating_ips
- - parsed_outputs.dns_floating_ip
+ - '{{ parsed_outputs.master_floating_ips }}'
+ - '{{ parsed_outputs.node_floating_ips }}'
+ - '{{ parsed_outputs.infra_floating_ips }}'
+ - '{{ parsed_outputs.dns_floating_ip }}'
- name: Wait for user setup
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
@@ -178,10 +181,10 @@
retries: 30
delay: 1
with_flattened:
- - parsed_outputs.master_floating_ips
- - parsed_outputs.node_floating_ips
- - parsed_outputs.infra_floating_ips
- - parsed_outputs.dns_floating_ip
+ - '{{ parsed_outputs.master_floating_ips }}'
+ - '{{ parsed_outputs.node_floating_ips }}'
+ - '{{ parsed_outputs.infra_floating_ips }}'
+ - '{{ parsed_outputs.dns_floating_ip }}'
- include: update.yml
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index ba9c6bf9c..60372e262 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -17,7 +17,7 @@
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost'])
+ with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- name: List Hosts
hosts: oo_list_hosts
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
index 5bd8476f1..980ab7337 100644
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ b/playbooks/openstack/openshift-cluster/terminate.yml
@@ -11,7 +11,7 @@
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([]))
+ with_items: "{{ (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([])) }}"
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index bc53a51b0..17063ef34 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -12,6 +12,8 @@ openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_k
default('~/.ssh/id_rsa.pub', True)) }}"
openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
default('0.0.0.0/0', True) }}"
+openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') |
+ default('0.0.0.0/0', True) }}"
openstack_flavor:
dns: "{{ lookup('oo_option', 'dns_flavor' ) | default('m1.small', True) }}"
etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
diff --git a/roles/cockpit-ui/meta/main.yml b/roles/cockpit-ui/meta/main.yml
new file mode 100644
index 000000000..6ad2e324a
--- /dev/null
+++ b/roles/cockpit-ui/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: Samuel Munilla
+ description: Deploy and Enable cockpit-ui
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.1
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
new file mode 100644
index 000000000..c752bcff1
--- /dev/null
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -0,0 +1,44 @@
+---
+- name: Expose docker-registry
+ command: >
+ {{ openshift.common.client_binary }} expose service docker-registry -n default
+ register: expose_docker_registry
+ changed_when: "'already exists' not in expose_docker_registry.stderr"
+ failed_when: "'already exists' not in expose_docker_registry.stderr and expose_docker_registry.rc != 0"
+
+- name: Create passthrough route for registry-console
+ command: >
+ {{ openshift.common.client_binary }} create route passthrough
+ --service registry-console
+ --port registry-console
+ -n default
+ register: create_registry_console_route
+ changed_when: "'already exists' not in create_registry_console_route.stderr"
+ failed_when: "'already exists' not in create_registry_console_route.stderr and create_registry_console_route.rc != 0"
+
+- name: Retrieve docker-registry route
+ command: "{{ openshift.common.client_binary }} get route docker-registry -n default --template='{{ '{{' }} .spec.host {{ '}}' }}'"
+ register: docker_registry_route
+ failed_when: false
+ changed_when: false
+
+- name: Retrieve cockpit kube url
+ command: "{{ openshift.common.client_binary }} get route registry-console -n default --template='https://{{ '{{' }} .spec.host {{ '}}' }}'"
+ register: registry_console_cockpit_kube_url
+ failed_when: false
+ changed_when: false
+
+- set_fact:
+ cockpit_image_prefix: "{{ '-p IMAGE_PREFIX=' ~ openshift_cockpit_deployer_prefix | default('') }}"
+
+- name: Deploy registry-console
+ command: >
+ {{ openshift.common.client_binary }} new-app --template=registry-console
+ {{ cockpit_image_prefix }}
+ -p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
+ -p REGISTRY_HOST="{{ docker_registry_route.stdout }}:80"
+ -p COCKPIT_KUBE_URL="{{ registry_console_cockpit_kube_url.stdout }}"
+ -n default
+ register: deploy_registry_console
+ changed_when: "'already exists' not in deploy_registry_console.stderr"
+ failed_when: "'already exists' not in deploy_registry_console.stderr and deploy_registry_console.rc != 0"
diff --git a/roles/flannel/README.md b/roles/flannel/README.md
index 8f271aada..84e2c5c49 100644
--- a/roles/flannel/README.md
+++ b/roles/flannel/README.md
@@ -13,15 +13,15 @@ to 0.3.
Role Variables
--------------
-| Name | Default value | Description |
-|---------------------|-----------------------------------------|-----------------------------------------------|
-| flannel_interface | ansible_default_ipv4.interface | interface to use for inter-host communication |
-| flannel_etcd_key | /openshift.com/network | etcd prefix |
-| etcd_hosts | etcd_urls | a list of etcd endpoints |
-| etcd_conf_dir | {{ openshift.common.config_base }}/node | SSL certificates directory |
-| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd |
-| etcd_peer_cert_file | Openshift SSL cert | SSL cert to use for etcd |
-| etcd_peer_key_file | Openshift SSL key | SSL key to use for etcd |
+| Name | Default value | Description |
+|----------------------|-----------------------------------------|-----------------------------------------------|
+| flannel_interface | ansible_default_ipv4.interface | interface to use for inter-host communication |
+| flannel_etcd_key | /openshift.com/network | etcd prefix |
+| etcd_hosts | etcd_urls | a list of etcd endpoints |
+| etcd_cert_config_dir | {{ openshift.common.config_base }}/node | SSL certificates directory |
+| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd |
+| etcd_peer_cert_file | Openshift SSL cert | SSL cert to use for etcd |
+| etcd_peer_key_file | Openshift SSL key | SSL key to use for etcd |
Dependencies
------------
diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml
index 34cebda9c..988731ef2 100644
--- a/roles/flannel/defaults/main.yaml
+++ b/roles/flannel/defaults/main.yaml
@@ -2,7 +2,6 @@
flannel_interface: "{{ ansible_default_ipv4.interface }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
-etcd_conf_dir: "{{ openshift.common.config_base }}/node"
-etcd_peer_ca_file: "{{ etcd_conf_dir }}/{{ 'ca' if (embedded_etcd | bool) else 'node.etcd-ca' }}.crt"
-etcd_peer_cert_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.crt"
-etcd_peer_key_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.key"
+etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/{{ 'ca' if (embedded_etcd | bool) else 'flannel.etcd-ca' }}.crt"
+etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.crt"
+etcd_peer_key_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.key"
diff --git a/roles/flannel_register/README.md b/roles/flannel_register/README.md
index 623c4c7cf..20a07c35e 100644
--- a/roles/flannel_register/README.md
+++ b/roles/flannel_register/README.md
@@ -16,7 +16,7 @@ Role Variables
|---------------------|----------------------------------------------------|-------------------------------------------------|
| flannel_network | {{ openshift.common.portal_net }} or 172.16.1.1/16 | interface to use for inter-host communication |
| flannel_min_network | {{ min_network }} or 172.16.5.0 | beginning of IP range for the subnet allocation |
-| flannel_subnet_len | /openshift.com/network | size of the subnet allocated to each host |
+| flannel_subnet_len | 24 | size of the subnet allocated to each host |
| flannel_etcd_key | /openshift.com/network | etcd prefix |
| etcd_hosts | etcd_urls | a list of etcd endpoints |
| etcd_conf_dir | {{ openshift.common.config_base }}/master | SSL certificates directory |
diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml
index 6111d1207..e217e37ea 100644
--- a/roles/openshift_cloud_provider/tasks/main.yml
+++ b/roles/openshift_cloud_provider/tasks/main.yml
@@ -2,12 +2,8 @@
- name: Set cloud provider facts
openshift_facts:
role: cloudprovider
- openshift_env: "{{ hostvars
- | oo_merge_hostvars(vars, inventory_hostname)
- | oo_openshift_env }}"
- openshift_env_structures:
- - 'openshift.cloudprovider.aws.*'
- - 'openshift.cloudprovider.openstack.*'
+ local_facts:
+ kind: "{{ openshift_cloudprovider_kind | default(None) }}"
- name: Create cloudprovider config dir
file:
diff --git a/roles/openshift_cloud_provider/tasks/openstack.yml b/roles/openshift_cloud_provider/tasks/openstack.yml
index c501121e5..f22dd4520 100644
--- a/roles/openshift_cloud_provider/tasks/openstack.yml
+++ b/roles/openshift_cloud_provider/tasks/openstack.yml
@@ -7,4 +7,4 @@
template:
dest: "{{ openshift.common.config_base }}/cloudprovider/openstack.conf"
src: openstack.conf.j2
- when: "'auth_url' in openshift.cloudprovider.openstack and 'username' in openshift.cloudprovider.openstack and 'password' in openshift.cloudprovider.openstack and ('tenant_id' in openshift.cloudprovider.openstack or 'tenant_name' in openshift.cloudprovider.openstack)"
+ when: "openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)"
diff --git a/roles/openshift_cloud_provider/templates/openstack.conf.j2 b/roles/openshift_cloud_provider/templates/openstack.conf.j2
index 8a06b3a08..ce452db24 100644
--- a/roles/openshift_cloud_provider/templates/openstack.conf.j2
+++ b/roles/openshift_cloud_provider/templates/openstack.conf.j2
@@ -1,16 +1,16 @@
[Global]
-auth-url = {{ openshift.cloudprovider.openstack.auth_url }}
-username = {{ openshift.cloudprovider.openstack.username }}
-password = {{ openshift.cloudprovider.openstack.password }}
-{% if 'tenant_id' in openshift.cloudprovider.openstack %}
-tenant-id = {{ openshift.cloudprovider.openstack.tenant_id }}
+auth-url = {{ openshift_cloudprovider_openstack_auth_url }}
+username = {{ openshift_cloudprovider_openstack_username }}
+password = {{ openshift_cloudprovider_openstack_password }}
+{% if openshift_cloudprovider_openstack_tenant_id is defined %}
+tenant-id = {{ openshift_cloudprovider_openstack_tenant_id }}
{% else %}
-tenant-name = {{ openshift.cloudprovider.openstack.tenant_name }}
+tenant-name = {{ openshift_cloudprovider_openstack_tenant_name }}
{% endif %}
-{% if 'region' in openshift.cloudprovider.openstack %}
-region = {{ openshift.cloudprovider.openstack.region }}
+{% if openshift_cloudprovider_openstack_region is defined %}
+region = {{ openshift_cloudprovider_openstack_region }}
{% endif %}
-{% if 'lb_subnet_id' in openshift.cloudprovider.openstack %}
+{% if openshift_cloudprovider_openstack_lb_subnet_id is defined %}
[LoadBalancer]
-subnet-id = {{ openshift.cloudprovider.openstack.lb_subnet_id }}
+subnet-id = {{ openshift_cloudprovider_openstack_lb_subnet_id }}
{% endif %}
diff --git a/roles/openshift_cloud_provider/vars/main.yml b/roles/openshift_cloud_provider/vars/main.yml
index c608e9b54..83bf6edc8 100644
--- a/roles/openshift_cloud_provider/vars/main.yml
+++ b/roles/openshift_cloud_provider/vars/main.yml
@@ -1,4 +1,4 @@
---
-has_cloudprovider: "{{ 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != None }}"
-cloudprovider_is_aws: "{{ has_cloudprovider | bool and openshift.cloudprovider.kind == 'aws' }}"
-cloudprovider_is_openstack: "{{ has_cloudprovider | bool and openshift.cloudprovider.kind == 'openstack' }}"
+has_cloudprovider: "{{ openshift_cloudprovider_kind | default(None) != None }}"
+cloudprovider_is_aws: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'aws' }}"
+cloudprovider_is_openstack: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'openstack' }}"
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index ece335fbf..6dae98f9f 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -1,7 +1,7 @@
---
- fail:
- msg: Flannel can not be used with openshift sdn
- when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool
+ msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
- fail:
msg: Nuage sdn can not be used with openshift sdn
diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml
index a15285417..e843049f9 100644
--- a/roles/openshift_examples/defaults/main.yml
+++ b/roles/openshift_examples/defaults/main.yml
@@ -20,6 +20,8 @@ xpaas_templates_base: "{{ examples_base }}/xpaas-templates"
quickstarts_base: "{{ examples_base }}/quickstart-templates"
infrastructure_origin_base: "{{ examples_base }}/infrastructure-templates/origin"
infrastructure_enterprise_base: "{{ examples_base }}/infrastructure-templates/enterprise"
+cockpit_ui_base: "{{ examples_base }}/infrastructure-templates/enterprise"
+
openshift_examples_import_command: "create"
registry_url: ""
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index f598cf8f2..1ad0d93a2 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -9,7 +9,7 @@ XPAAS_VERSION=ose-v1.3.3
ORIGIN_VERSION=${1:-v1.3}
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
-find ${EXAMPLES_BASE} -name '*.yaml' -delete
+find ${EXAMPLES_BASE} -name '*.yaml' -delete -exclude registry-console.json
TEMP=`mktemp -d`
pushd $TEMP
@@ -29,7 +29,6 @@ unzip cakephp-ex-master.zip
unzip application-templates-master.zip
cp origin-master/examples/db-templates/* ${EXAMPLES_BASE}/db-templates/
cp origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quickstart-templates/
-cp origin-master/examples/jenkins/pipeline/jenkinstemplate.json ${EXAMPLES_BASE}/quickstart-templates/
cp origin-master/examples/image-streams/* ${EXAMPLES_BASE}/image-streams/
cp django-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/
cp rails-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/README.md b/roles/openshift_examples/files/examples/v1.3/db-templates/README.md
index 609f4dec9..c66bdb8bf 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/README.md
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/README.md
@@ -38,35 +38,11 @@ Replace `/path/to/template.json` with an appropriate path, that can be either a
local path or an URL. Example:
$ oc new-app https://raw.githubusercontent.com/openshift/origin/master/examples/db-templates/mongodb-ephemeral-template.json
- --> Deploying template mongodb-ephemeral for "https://raw.githubusercontent.com/openshift/origin/master/examples/db-templates/mongodb-ephemeral-template.json"
- With parameters:
- DATABASE_SERVICE_NAME=mongodb
- MONGODB_USER=userJNX # generated
- MONGODB_PASSWORD=tnEDilMVrgjp5AI2 # generated
- MONGODB_DATABASE=sampledb
- MONGODB_ADMIN_PASSWORD=8bYEs8OlNYhVyMBs # generated
- --> Creating resources ...
- Service "mongodb" created
- DeploymentConfig "mongodb" created
- --> Success
- Run 'oc status' to view your app.
The parameters listed in the output above can be tweaked by specifying values in
the command line with the `-p` option:
$ oc new-app examples/db-templates/mongodb-ephemeral-template.json -p DATABASE_SERVICE_NAME=mydb -p MONGODB_USER=default
- --> Deploying template mongodb-ephemeral for "examples/db-templates/mongodb-ephemeral-template.json"
- With parameters:
- DATABASE_SERVICE_NAME=mydb
- MONGODB_USER=default
- MONGODB_PASSWORD=RPvMbWlQFOevSowQ # generated
- MONGODB_DATABASE=sampledb
- MONGODB_ADMIN_PASSWORD=K7tIjDxDHHYCvFrJ # generated
- --> Creating resources ...
- Service "mydb" created
- DeploymentConfig "mydb" created
- --> Success
- Run 'oc status' to view your app.
Note that the persistent template requires an existing persistent volume,
otherwise the deployment won't ever succeed.
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml
index 032f94a18..afd47ec7c 100644
--- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml
@@ -34,9 +34,11 @@ objects:
metadata:
generateName: metrics-deployer-
spec:
+ securityContext: {}
containers:
- image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
name: deployer
+ securityContext: {}
volumeMounts:
- name: secret
mountPath: /secret
@@ -48,6 +50,10 @@ objects:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
- name: IMAGE_PREFIX
value: ${IMAGE_PREFIX}
- name: IMAGE_VERSION
@@ -58,8 +64,12 @@ objects:
value: ${MODE}
- name: REDEPLOY
value: ${REDEPLOY}
+ - name: IGNORE_PREFLIGHT
+ value: ${IGNORE_PREFLIGHT}
- name: USE_PERSISTENT_STORAGE
value: ${USE_PERSISTENT_STORAGE}
+ - name: DYNAMICALLY_PROVISION_STORAGE
+ value: ${DYNAMICALLY_PROVISION_STORAGE}
- name: HAWKULAR_METRICS_HOSTNAME
value: ${HAWKULAR_METRICS_HOSTNAME}
- name: CASSANDRA_NODES
@@ -68,6 +78,10 @@ objects:
value: ${CASSANDRA_PV_SIZE}
- name: METRIC_DURATION
value: ${METRIC_DURATION}
+ - name: USER_WRITE_ACCESS
+ value: ${USER_WRITE_ACCESS}
+ - name: HEAPSTER_NODE_ID
+ value: ${HEAPSTER_NODE_ID}
- name: METRIC_RESOLUTION
value: ${METRIC_RESOLUTION}
dnsPolicy: ClusterFirst
@@ -87,7 +101,7 @@ parameters:
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
- value: "3.2.1"
+ value: "3.3.0"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
@@ -97,7 +111,7 @@ parameters:
name: HAWKULAR_METRICS_HOSTNAME
required: true
-
- description: "Can be set to: 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process)"
+ description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
name: MODE
value: "deploy"
-
@@ -105,10 +119,18 @@ parameters:
name: REDEPLOY
value: "false"
-
+ description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
+ name: IGNORE_PREFLIGHT
+ value: "false"
+-
description: "Set to true for persistent storage, set to false to use non persistent storage"
name: USE_PERSISTENT_STORAGE
value: "true"
-
+ description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
+ name: DYNAMICALLY_PROVISION_STORAGE
+ value: "false"
+-
description: "The number of Cassandra Nodes to deploy for the initial cluster"
name: CASSANDRA_NODES
value: "1"
@@ -121,6 +143,14 @@ parameters:
name: METRIC_DURATION
value: "7"
-
- description: "How often metrics should be gathered. Defaults value of '10s' for 10 seconds"
+ description: "If a user accounts should be allowed to write metrics."
+ name: USER_WRITE_ACCESS
+ value: "false"
+-
+ description: "The identifier used when generating metric ids in Hawkular"
+ name: HEAPSTER_NODE_ID
+ value: "nodename"
+-
+ description: "How often metrics should be gathered. Defaults value of '15s' for 15 seconds"
name: METRIC_RESOLUTION
- value: "10s"
+ value: "15s"
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/registry-console.yaml b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/registry-console.yaml
new file mode 100644
index 000000000..11478263c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/registry-console.yaml
@@ -0,0 +1,124 @@
+kind: Template
+apiVersion: v1
+metadata:
+ name: "registry-console"
+ annotations:
+ description: "Template for deploying registry web console. Requires cluster-admin."
+ tags: infrastructure
+labels:
+ createdBy: "registry-console-template"
+objects:
+ - kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: "registry-console"
+ labels:
+ name: "registry-console"
+ spec:
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "registry-console"
+ template:
+ metadata:
+ labels:
+ name: "registry-console"
+ spec:
+ containers:
+ - name: registry-console
+ image: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ ports:
+ - containerPort: 9090
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ping
+ port: 9090
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ping
+ port: 9090
+ scheme: HTTP
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ env:
+ - name: OPENSHIFT_OAUTH_PROVIDER_URL
+ value: "${OPENSHIFT_OAUTH_PROVIDER_URL}"
+ - name: OPENSHIFT_OAUTH_CLIENT_ID
+ value: "${OPENSHIFT_OAUTH_CLIENT_ID}"
+ - name: KUBERNETES_INSECURE
+ value: "false"
+ - name: COCKPIT_KUBE_INSECURE
+ value: "false"
+ - name: REGISTRY_ONLY
+ value: "true"
+ - name: REGISTRY_HOST
+ value: "${REGISTRY_HOST}"
+ - kind: Service
+ apiVersion: v1
+ metadata:
+ name: "registry-console"
+ labels:
+ name: "registry-console"
+ spec:
+ type: ClusterIP
+ ports:
+ - name: registry-console
+ protocol: TCP
+ port: 9000
+ targetPort: 9090
+ selector:
+ name: "registry-console"
+ - kind: ImageStream
+ apiVersion: v1
+ metadata:
+ name: registry-console
+ annotations:
+ description: Atomic Registry console
+ spec:
+ tags:
+ - annotations: null
+ from:
+ kind: DockerImage
+ name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_VERSION}
+ - kind: OAuthClient
+ apiVersion: v1
+ metadata:
+ name: "${OPENSHIFT_OAUTH_CLIENT_ID}"
+ respondWithChallenges: false
+ secret: "${OPENSHIFT_OAUTH_CLIENT_SECRET}"
+ redirectURIs:
+ - "${COCKPIT_KUBE_URL}"
+parameters:
+ - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+ - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.3", set version "3.3"'
+ name: IMAGE_VERSION
+ value: "3.3"
+ - description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
+ name: OPENSHIFT_OAUTH_PROVIDER_URL
+ required: true
+ - description: "The registry console URL. This should be created beforehand using 'oc create route passthrough --service registry-console --port registry-console -n default', e.g. https://registry-console-default.example.com"
+ name: COCKPIT_KUBE_URL
+ required: true
+ - description: "Oauth client secret"
+ name: OPENSHIFT_OAUTH_CLIENT_SECRET
+ from: "user[a-zA-Z0-9]{64}"
+ generate: expression
+ - description: "Oauth client id"
+ name: OPENSHIFT_OAUTH_CLIENT_ID
+ value: "cockpit-oauth-client"
+ - description: "The integrated registry hostname exposed via route, e.g. registry.example.com"
+ name: REGISTRY_HOST
+ required: true
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml
index ab62ae76f..ac5098c8a 100644
--- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml
@@ -151,6 +151,6 @@ parameters:
name: HEAPSTER_NODE_ID
value: "nodename"
-
- description: "How often metrics should be gathered. Defaults value of '10s' for 10 seconds"
+ description: "How often metrics should be gathered. Defaults value of '15s' for 15 seconds"
name: METRIC_RESOLUTION
- value: "10s"
+ value: "15s"
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/registry-console.yaml b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/registry-console.yaml
new file mode 100644
index 000000000..80cc4233b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/registry-console.yaml
@@ -0,0 +1,124 @@
+kind: Template
+apiVersion: v1
+metadata:
+ name: "registry-console"
+ annotations:
+ description: "Template for deploying registry web console. Requires cluster-admin."
+ tags: infrastructure
+labels:
+ createdBy: "registry-console-template"
+objects:
+ - kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: "registry-console"
+ labels:
+ name: "registry-console"
+ spec:
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "registry-console"
+ template:
+ metadata:
+ labels:
+ name: "registry-console"
+ spec:
+ containers:
+ - name: registry-console
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ ports:
+ - containerPort: 9090
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ping
+ port: 9090
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ping
+ port: 9090
+ scheme: HTTP
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ env:
+ - name: OPENSHIFT_OAUTH_PROVIDER_URL
+ value: "${OPENSHIFT_OAUTH_PROVIDER_URL}"
+ - name: OPENSHIFT_OAUTH_CLIENT_ID
+ value: "${OPENSHIFT_OAUTH_CLIENT_ID}"
+ - name: KUBERNETES_INSECURE
+ value: "false"
+ - name: COCKPIT_KUBE_INSECURE
+ value: "false"
+ - name: REGISTRY_ONLY
+ value: "true"
+ - name: REGISTRY_HOST
+ value: "${REGISTRY_HOST}"
+ - kind: Service
+ apiVersion: v1
+ metadata:
+ name: "registry-console"
+ labels:
+ name: "registry-console"
+ spec:
+ type: ClusterIP
+ ports:
+ - name: registry-console
+ protocol: TCP
+ port: 9000
+ targetPort: 9090
+ selector:
+ name: "registry-console"
+ - kind: ImageStream
+ apiVersion: v1
+ metadata:
+ name: registry-console
+ annotations:
+ description: Atomic Registry console
+ spec:
+ tags:
+ - annotations: null
+ from:
+ kind: DockerImage
+ name: ${IMAGE_NAME}
+ name: ${IMAGE_VERSION}
+ - kind: OAuthClient
+ apiVersion: v1
+ metadata:
+ name: "${OPENSHIFT_OAUTH_CLIENT_ID}"
+ respondWithChallenges: false
+ secret: "${OPENSHIFT_OAUTH_CLIENT_SECRET}"
+ redirectURIs:
+ - "${COCKPIT_KUBE_URL}"
+parameters:
+ - description: "Container image name"
+ name: IMAGE_NAME
+ value: "cockpit/kubernetes"
+ - description: 'Specify image version; e.g. for "cockpit/kubernetes:latest", set version "latest"'
+ name: IMAGE_VERSION
+ value: latest
+ - description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
+ name: OPENSHIFT_OAUTH_PROVIDER_URL
+ required: true
+ - description: "The registry console URL. This should be created beforehand using 'oc create route passthrough --service registry-console --port registry-console -n default', e.g. https://registry-console-default.example.com"
+ name: COCKPIT_KUBE_URL
+ required: true
+ - description: "Oauth client secret"
+ name: OPENSHIFT_OAUTH_CLIENT_SECRET
+ from: "user[a-zA-Z0-9]{64}"
+ generate: expression
+ - description: "Oauth client id"
+ name: OPENSHIFT_OAUTH_CLIENT_ID
+ value: "cockpit-oauth-client"
+ - description: "The integrated registry hostname exposed via route, e.g. registry.example.com"
+ name: REGISTRY_HOST
+ required: true
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json
index f85e7e537..ab4982690 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json
@@ -126,9 +126,9 @@
},
"spec": {
"strategy": {
- "type": "Rolling",
+ "type": "Recreate",
"recreateParams": {
- "pre": {
+ "pre": {
"failurePolicy": "Retry",
"execNewPod": {
"command": [
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
index 4f565206f..e8e361415 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
@@ -5,12 +5,12 @@
"name": "jenkins-ephemeral",
"creationTimestamp": null,
"annotations": {
- "description": "Jenkins service, without persistent storage.\nThe username is 'admin' and the tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "description": "Jenkins service, without persistent storage.\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins"
}
},
- "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}.",
+ "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
"objects": [
{
"kind": "Route",
@@ -26,6 +26,7 @@
},
"tls": {
"termination": "edge",
+ "insecureEdgeTerminationPolicy": "Redirect",
"certificate": "-----BEGIN CERTIFICATE-----\nMIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBoTELMAkGA1UEBhMCVVMx\nCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0Rl\nZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0ExGjAYBgNVBAMMEXd3\ndy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu\nY29tMB4XDTE1MDExMjE0MTk0MVoXDTE2MDExMjE0MTk0MVowfDEYMBYGA1UEAwwP\nd3d3LmV4YW1wbGUuY29tMQswCQYDVQQIDAJTQzELMAkGA1UEBhMCVVMxIjAgBgkq\nhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20xEDAOBgNVBAoMB0V4YW1wbGUx\nEDAOBgNVBAsMB0V4YW1wbGUwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMrv\ngu6ZTTefNN7jjiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm\n47VRx5Qrf/YLXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1M\nmNrQUgZyQC6XIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAGjDTALMAkGA1UdEwQC\nMAAwDQYJKoZIhvcNAQEFBQADggEBAFCi7ZlkMnESvzlZCvv82Pq6S46AAOTPXdFd\nTMvrh12E1sdVALF1P1oYFJzG1EiZ5ezOx88fEDTW+Lxb9anw5/KJzwtWcfsupf1m\nV7J0D3qKzw5C1wjzYHh9/Pz7B1D0KthQRATQCfNf8s6bbFLaw/dmiIUhHLtIH5Qc\nyfrejTZbOSP77z8NOWir+BWWgIDDB2//3AkDIQvT20vmkZRhkqSdT7et4NmXOX/j\njhPti4b2Fie0LeuvgaOdKjCpQQNrYthZHXeVlOLRhMTSk3qUczenkKTOhvP7IS9q\n+Dzv5hqgSfvMG392KWh5f8xXfJNs4W5KLbZyl901MeReiLrPH3w=\n-----END CERTIFICATE-----",
"key": "-----BEGIN PRIVATE KEY-----\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMrvgu6ZTTefNN7j\njiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm47VRx5Qrf/YL\nXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1MmNrQUgZyQC6X\nIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAECgYEAnxOjEj/vrLNLMZE1Q9H7PZVF\nWdP/JQVNvQ7tCpZ3ZdjxHwkvf//aQnuxS5yX2Rnf37BS/TZu+TIkK4373CfHomSx\nUTAn2FsLmOJljupgGcoeLx5K5nu7B7rY5L1NHvdpxZ4YjeISrRtEPvRakllENU5y\ngJE8c2eQOx08ZSRE4TkCQQD7dws2/FldqwdjJucYijsJVuUdoTqxP8gWL6bB251q\nelP2/a6W2elqOcWId28560jG9ZS3cuKvnmu/4LG88vZFAkEAzphrH3673oTsHN+d\nuBd5uyrlnGjWjuiMKv2TPITZcWBjB8nJDSvLneHF59MYwejNNEof2tRjgFSdImFH\nmi995wJBAMtPjW6wiqRz0i41VuT9ZgwACJBzOdvzQJfHgSD9qgFb1CU/J/hpSRIM\nkYvrXK9MbvQFvG6x4VuyT1W8mpe1LK0CQAo8VPpffhFdRpF7psXLK/XQ/0VLkG3O\nKburipLyBg/u9ZkaL0Ley5zL5dFBjTV2Qkx367Ic2b0u9AYTCcgi2DsCQQD3zZ7B\nv7BOm7MkylKokY2MduFFXU0Bxg6pfZ7q3rvg8gqhUFbaMStPRYg6myiDiW/JfLhF\nTcFT4touIo7oriFJ\n-----END PRIVATE KEY-----",
"caCertificate": "-----BEGIN CERTIFICATE-----\nMIIEFzCCAv+gAwIBAgIJALK1iUpF2VQLMA0GCSqGSIb3DQEBBQUAMIGhMQswCQYD\nVQQGEwJVUzELMAkGA1UECAwCU0MxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoG\nA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEQMA4GA1UECwwHVGVzdCBDQTEaMBgG\nA1UEAwwRd3d3LmV4YW1wbGVjYS5jb20xIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVA\nZXhhbXBsZS5jb20wHhcNMTUwMTEyMTQxNTAxWhcNMjUwMTA5MTQxNTAxWjCBoTEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkx\nHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0Ex\nGjAYBgNVBAMMEXd3dy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFt\ncGxlQGV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\nw2rK1J2NMtQj0KDug7g7HRKl5jbf0QMkMKyTU1fBtZ0cCzvsF4CqV11LK4BSVWaK\nrzkaXe99IVJnH8KdOlDl5Dh/+cJ3xdkClSyeUT4zgb6CCBqg78ePp+nN11JKuJlV\nIG1qdJpB1J5O/kCLsGcTf7RS74MtqMFo96446Zvt7YaBhWPz6gDaO/TUzfrNcGLA\nEfHVXkvVWqb3gqXUztZyVex/gtP9FXQ7gxTvJml7UkmT0VAFjtZnCqmFxpLZFZ15\n+qP9O7Q2MpsGUO/4vDAuYrKBeg1ZdPSi8gwqUP2qWsGd9MIWRv3thI2903BczDc7\nr8WaIbm37vYZAS9G56E4+wIDAQABo1AwTjAdBgNVHQ4EFgQUugLrSJshOBk5TSsU\nANs4+SmJUGwwHwYDVR0jBBgwFoAUugLrSJshOBk5TSsUANs4+SmJUGwwDAYDVR0T\nBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaMJ33zAMV4korHo5aPfayV3uHoYZ\n1ChzP3eSsF+FjoscpoNSKs91ZXZF6LquzoNezbfiihK4PYqgwVD2+O0/Ty7UjN4S\nqzFKVR4OS/6lCJ8YncxoFpTntbvjgojf1DEataKFUN196PAANc3yz8cWHF4uvjPv\nWkgFqbIjb+7D1YgglNyovXkRDlRZl0LD1OQ0ZWhd4Ge1qx8mmmanoBeYZ9+DgpFC\nj9tQAbS867yeOryNe7sEOIpXAAqK/DTu0hB6+ySsDfMo4piXCc2aA/eI2DCuw08e\nw17Dz9WnupZjVdwTKzDhFgJZMLDqn37HQnT6EemLFqbcR0VPEnfyhDtZIQ==\n-----END CERTIFICATE-----"
@@ -75,6 +76,7 @@
}
},
"spec": {
+ "serviceAccountName": "${JENKINS_SERVICE_NAME}",
"containers": [
{
"name": "jenkins",
@@ -89,7 +91,7 @@
},
"livenessProbe": {
"timeoutSeconds": 3,
- "initialDelaySeconds": 60,
+ "initialDelaySeconds": 120,
"httpGet": {
"path": "/login",
"port": 8080
@@ -99,6 +101,18 @@
{
"name": "JENKINS_PASSWORD",
"value": "${JENKINS_PASSWORD}"
+ },
+ {
+ "name": "KUBERNETES_MASTER",
+ "value": "https://kubernetes.default:443"
+ },
+ {
+ "name": "KUBERNETES_TRUST_CERTIFICATES",
+ "value": "true"
+ },
+ {
+ "name": "JNLP_SERVICE_NAME",
+ "value": "${JNLP_SERVICE_NAME}"
}
],
"resources": {
@@ -136,19 +150,42 @@
}
},
{
+ "kind": "ServiceAccount",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ },
+ {
+ "kind": "RoleBinding",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}_edit"
+ },
+ "groupNames": null,
+ "subjects": [
+ {
+ "kind": "ServiceAccount",
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ ],
+ "roleRef": {
+ "name": "edit"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "name": "${JNLP_SERVICE_NAME}"
},
"spec": {
"ports": [
{
- "name": "web",
+ "name": "agent",
"protocol": "TCP",
- "port": 8080,
- "targetPort": 8080,
+ "port": 50000,
+ "targetPort": 50000,
"nodePort": 0
}
],
@@ -159,6 +196,35 @@
"type": "ClusterIP",
"sessionAffinity": "None"
}
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",
+ "service.openshift.io/infrastructure": "true"
+ },
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 8080,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "portalIP": "",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
}
],
"parameters": [
@@ -169,6 +235,12 @@
"value": "jenkins"
},
{
+ "name": "JNLP_SERVICE_NAME",
+ "displayName": "Jenkins JNLP Service Name",
+ "description": "The name of the service used for master/slave communication.",
+ "value": "jenkins-jnlp"
+ },
+ {
"name": "JENKINS_PASSWORD",
"displayName": "Jenkins Password",
"description": "Password for the Jenkins 'admin' user.",
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
index eda826a5b..e4a18961e 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
@@ -5,12 +5,12 @@
"name": "jenkins-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "Jenkins service, with persistent storage.\nThe username is 'admin' and the tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.\nYou must have persistent volumes available in your cluster to use this template.",
+ "description": "Jenkins service, with persistent storage.\nYou must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins"
}
},
- "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}.",
+ "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
"objects": [
{
"kind": "Route",
@@ -26,6 +26,7 @@
},
"tls": {
"termination": "edge",
+ "insecureEdgeTerminationPolicy": "Redirect",
"certificate": "-----BEGIN CERTIFICATE-----\nMIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBoTELMAkGA1UEBhMCVVMx\nCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0Rl\nZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0ExGjAYBgNVBAMMEXd3\ndy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu\nY29tMB4XDTE1MDExMjE0MTk0MVoXDTE2MDExMjE0MTk0MVowfDEYMBYGA1UEAwwP\nd3d3LmV4YW1wbGUuY29tMQswCQYDVQQIDAJTQzELMAkGA1UEBhMCVVMxIjAgBgkq\nhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20xEDAOBgNVBAoMB0V4YW1wbGUx\nEDAOBgNVBAsMB0V4YW1wbGUwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMrv\ngu6ZTTefNN7jjiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm\n47VRx5Qrf/YLXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1M\nmNrQUgZyQC6XIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAGjDTALMAkGA1UdEwQC\nMAAwDQYJKoZIhvcNAQEFBQADggEBAFCi7ZlkMnESvzlZCvv82Pq6S46AAOTPXdFd\nTMvrh12E1sdVALF1P1oYFJzG1EiZ5ezOx88fEDTW+Lxb9anw5/KJzwtWcfsupf1m\nV7J0D3qKzw5C1wjzYHh9/Pz7B1D0KthQRATQCfNf8s6bbFLaw/dmiIUhHLtIH5Qc\nyfrejTZbOSP77z8NOWir+BWWgIDDB2//3AkDIQvT20vmkZRhkqSdT7et4NmXOX/j\njhPti4b2Fie0LeuvgaOdKjCpQQNrYthZHXeVlOLRhMTSk3qUczenkKTOhvP7IS9q\n+Dzv5hqgSfvMG392KWh5f8xXfJNs4W5KLbZyl901MeReiLrPH3w=\n-----END CERTIFICATE-----",
"key": "-----BEGIN PRIVATE KEY-----\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMrvgu6ZTTefNN7j\njiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm47VRx5Qrf/YL\nXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1MmNrQUgZyQC6X\nIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAECgYEAnxOjEj/vrLNLMZE1Q9H7PZVF\nWdP/JQVNvQ7tCpZ3ZdjxHwkvf//aQnuxS5yX2Rnf37BS/TZu+TIkK4373CfHomSx\nUTAn2FsLmOJljupgGcoeLx5K5nu7B7rY5L1NHvdpxZ4YjeISrRtEPvRakllENU5y\ngJE8c2eQOx08ZSRE4TkCQQD7dws2/FldqwdjJucYijsJVuUdoTqxP8gWL6bB251q\nelP2/a6W2elqOcWId28560jG9ZS3cuKvnmu/4LG88vZFAkEAzphrH3673oTsHN+d\nuBd5uyrlnGjWjuiMKv2TPITZcWBjB8nJDSvLneHF59MYwejNNEof2tRjgFSdImFH\nmi995wJBAMtPjW6wiqRz0i41VuT9ZgwACJBzOdvzQJfHgSD9qgFb1CU/J/hpSRIM\nkYvrXK9MbvQFvG6x4VuyT1W8mpe1LK0CQAo8VPpffhFdRpF7psXLK/XQ/0VLkG3O\nKburipLyBg/u9ZkaL0Ley5zL5dFBjTV2Qkx367Ic2b0u9AYTCcgi2DsCQQD3zZ7B\nv7BOm7MkylKokY2MduFFXU0Bxg6pfZ7q3rvg8gqhUFbaMStPRYg6myiDiW/JfLhF\nTcFT4touIo7oriFJ\n-----END PRIVATE KEY-----",
"caCertificate": "-----BEGIN CERTIFICATE-----\nMIIEFzCCAv+gAwIBAgIJALK1iUpF2VQLMA0GCSqGSIb3DQEBBQUAMIGhMQswCQYD\nVQQGEwJVUzELMAkGA1UECAwCU0MxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoG\nA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEQMA4GA1UECwwHVGVzdCBDQTEaMBgG\nA1UEAwwRd3d3LmV4YW1wbGVjYS5jb20xIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVA\nZXhhbXBsZS5jb20wHhcNMTUwMTEyMTQxNTAxWhcNMjUwMTA5MTQxNTAxWjCBoTEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkx\nHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0Ex\nGjAYBgNVBAMMEXd3dy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFt\ncGxlQGV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\nw2rK1J2NMtQj0KDug7g7HRKl5jbf0QMkMKyTU1fBtZ0cCzvsF4CqV11LK4BSVWaK\nrzkaXe99IVJnH8KdOlDl5Dh/+cJ3xdkClSyeUT4zgb6CCBqg78ePp+nN11JKuJlV\nIG1qdJpB1J5O/kCLsGcTf7RS74MtqMFo96446Zvt7YaBhWPz6gDaO/TUzfrNcGLA\nEfHVXkvVWqb3gqXUztZyVex/gtP9FXQ7gxTvJml7UkmT0VAFjtZnCqmFxpLZFZ15\n+qP9O7Q2MpsGUO/4vDAuYrKBeg1ZdPSi8gwqUP2qWsGd9MIWRv3thI2903BczDc7\nr8WaIbm37vYZAS9G56E4+wIDAQABo1AwTjAdBgNVHQ4EFgQUugLrSJshOBk5TSsU\nANs4+SmJUGwwHwYDVR0jBBgwFoAUugLrSJshOBk5TSsUANs4+SmJUGwwDAYDVR0T\nBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaMJ33zAMV4korHo5aPfayV3uHoYZ\n1ChzP3eSsF+FjoscpoNSKs91ZXZF6LquzoNezbfiihK4PYqgwVD2+O0/Ty7UjN4S\nqzFKVR4OS/6lCJ8YncxoFpTntbvjgojf1DEataKFUN196PAANc3yz8cWHF4uvjPv\nWkgFqbIjb+7D1YgglNyovXkRDlRZl0LD1OQ0ZWhd4Ge1qx8mmmanoBeYZ9+DgpFC\nj9tQAbS867yeOryNe7sEOIpXAAqK/DTu0hB6+ySsDfMo4piXCc2aA/eI2DCuw08e\nw17Dz9WnupZjVdwTKzDhFgJZMLDqn37HQnT6EemLFqbcR0VPEnfyhDtZIQ==\n-----END CERTIFICATE-----"
@@ -92,6 +93,7 @@
}
},
"spec": {
+ "serviceAccountName": "${JENKINS_SERVICE_NAME}",
"containers": [
{
"name": "jenkins",
@@ -106,7 +108,7 @@
},
"livenessProbe": {
"timeoutSeconds": 3,
- "initialDelaySeconds": 60,
+ "initialDelaySeconds": 120,
"httpGet": {
"path": "/login",
"port": 8080
@@ -116,6 +118,18 @@
{
"name": "JENKINS_PASSWORD",
"value": "${JENKINS_PASSWORD}"
+ },
+ {
+ "name": "KUBERNETES_MASTER",
+ "value": "https://kubernetes.default:443"
+ },
+ {
+ "name": "KUBERNETES_TRUST_CERTIFICATES",
+ "value": "true"
+ },
+ {
+ "name": "JNLP_SERVICE_NAME",
+ "value": "${JNLP_SERVICE_NAME}"
}
],
"resources": {
@@ -153,19 +167,42 @@
}
},
{
+ "kind": "ServiceAccount",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ },
+ {
+ "kind": "RoleBinding",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}_edit"
+ },
+ "groupNames": null,
+ "subjects": [
+ {
+ "kind": "ServiceAccount",
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ ],
+ "roleRef": {
+ "name": "edit"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "name": "${JNLP_SERVICE_NAME}"
},
"spec": {
"ports": [
{
- "name": "web",
+ "name": "agent",
"protocol": "TCP",
- "port": 8080,
- "targetPort": 8080,
+ "port": 50000,
+ "targetPort": 50000,
"nodePort": 0
}
],
@@ -176,6 +213,35 @@
"type": "ClusterIP",
"sessionAffinity": "None"
}
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",
+ "service.openshift.io/infrastructure": "true"
+ },
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 8080,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "portalIP": "",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
}
],
"parameters": [
@@ -186,6 +252,12 @@
"value": "jenkins"
},
{
+ "name": "JNLP_SERVICE_NAME",
+ "displayName": "Jenkins JNLP Service Name",
+ "description": "The name of the service used for master/slave communication.",
+ "value": "jenkins-jnlp"
+ },
+ {
"name": "JENKINS_PASSWORD",
"displayName": "Jenkins Password",
"description": "Password for the Jenkins 'admin' user.",
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkinstemplate.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkinstemplate.json
deleted file mode 100644
index fc409f709..000000000
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkinstemplate.json
+++ /dev/null
@@ -1,256 +0,0 @@
-{
- "kind": "Template",
- "apiVersion": "v1",
- "metadata": {
- "name": "jenkins",
- "creationTimestamp": null,
- "annotations": {
- "description": "Jenkins service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
- "iconClass": "icon-jenkins",
- "tags": "instant-app,jenkins"
- }
- },
- "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}.",
- "objects": [
- {
- "kind": "Route",
- "apiVersion": "v1",
- "metadata": {
- "name": "jenkins",
- "creationTimestamp": null
- },
- "spec": {
- "to": {
- "kind": "Service",
- "name": "${JENKINS_SERVICE_NAME}"
- },
- "tls": {
- "termination": "edge",
- "insecureEdgeTerminationPolicy": "Redirect",
- "certificate": "-----BEGIN CERTIFICATE-----\nMIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBoTELMAkGA1UEBhMCVVMx\nCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0Rl\nZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0ExGjAYBgNVBAMMEXd3\ndy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu\nY29tMB4XDTE1MDExMjE0MTk0MVoXDTE2MDExMjE0MTk0MVowfDEYMBYGA1UEAwwP\nd3d3LmV4YW1wbGUuY29tMQswCQYDVQQIDAJTQzELMAkGA1UEBhMCVVMxIjAgBgkq\nhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20xEDAOBgNVBAoMB0V4YW1wbGUx\nEDAOBgNVBAsMB0V4YW1wbGUwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMrv\ngu6ZTTefNN7jjiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm\n47VRx5Qrf/YLXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1M\nmNrQUgZyQC6XIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAGjDTALMAkGA1UdEwQC\nMAAwDQYJKoZIhvcNAQEFBQADggEBAFCi7ZlkMnESvzlZCvv82Pq6S46AAOTPXdFd\nTMvrh12E1sdVALF1P1oYFJzG1EiZ5ezOx88fEDTW+Lxb9anw5/KJzwtWcfsupf1m\nV7J0D3qKzw5C1wjzYHh9/Pz7B1D0KthQRATQCfNf8s6bbFLaw/dmiIUhHLtIH5Qc\nyfrejTZbOSP77z8NOWir+BWWgIDDB2//3AkDIQvT20vmkZRhkqSdT7et4NmXOX/j\njhPti4b2Fie0LeuvgaOdKjCpQQNrYthZHXeVlOLRhMTSk3qUczenkKTOhvP7IS9q\n+Dzv5hqgSfvMG392KWh5f8xXfJNs4W5KLbZyl901MeReiLrPH3w=\n-----END CERTIFICATE-----",
- "key": "-----BEGIN PRIVATE KEY-----\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMrvgu6ZTTefNN7j\njiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm47VRx5Qrf/YL\nXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1MmNrQUgZyQC6X\nIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAECgYEAnxOjEj/vrLNLMZE1Q9H7PZVF\nWdP/JQVNvQ7tCpZ3ZdjxHwkvf//aQnuxS5yX2Rnf37BS/TZu+TIkK4373CfHomSx\nUTAn2FsLmOJljupgGcoeLx5K5nu7B7rY5L1NHvdpxZ4YjeISrRtEPvRakllENU5y\ngJE8c2eQOx08ZSRE4TkCQQD7dws2/FldqwdjJucYijsJVuUdoTqxP8gWL6bB251q\nelP2/a6W2elqOcWId28560jG9ZS3cuKvnmu/4LG88vZFAkEAzphrH3673oTsHN+d\nuBd5uyrlnGjWjuiMKv2TPITZcWBjB8nJDSvLneHF59MYwejNNEof2tRjgFSdImFH\nmi995wJBAMtPjW6wiqRz0i41VuT9ZgwACJBzOdvzQJfHgSD9qgFb1CU/J/hpSRIM\nkYvrXK9MbvQFvG6x4VuyT1W8mpe1LK0CQAo8VPpffhFdRpF7psXLK/XQ/0VLkG3O\nKburipLyBg/u9ZkaL0Ley5zL5dFBjTV2Qkx367Ic2b0u9AYTCcgi2DsCQQD3zZ7B\nv7BOm7MkylKokY2MduFFXU0Bxg6pfZ7q3rvg8gqhUFbaMStPRYg6myiDiW/JfLhF\nTcFT4touIo7oriFJ\n-----END PRIVATE KEY-----",
- "caCertificate": "-----BEGIN CERTIFICATE-----\nMIIEFzCCAv+gAwIBAgIJALK1iUpF2VQLMA0GCSqGSIb3DQEBBQUAMIGhMQswCQYD\nVQQGEwJVUzELMAkGA1UECAwCU0MxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoG\nA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEQMA4GA1UECwwHVGVzdCBDQTEaMBgG\nA1UEAwwRd3d3LmV4YW1wbGVjYS5jb20xIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVA\nZXhhbXBsZS5jb20wHhcNMTUwMTEyMTQxNTAxWhcNMjUwMTA5MTQxNTAxWjCBoTEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkx\nHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0Ex\nGjAYBgNVBAMMEXd3dy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFt\ncGxlQGV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\nw2rK1J2NMtQj0KDug7g7HRKl5jbf0QMkMKyTU1fBtZ0cCzvsF4CqV11LK4BSVWaK\nrzkaXe99IVJnH8KdOlDl5Dh/+cJ3xdkClSyeUT4zgb6CCBqg78ePp+nN11JKuJlV\nIG1qdJpB1J5O/kCLsGcTf7RS74MtqMFo96446Zvt7YaBhWPz6gDaO/TUzfrNcGLA\nEfHVXkvVWqb3gqXUztZyVex/gtP9FXQ7gxTvJml7UkmT0VAFjtZnCqmFxpLZFZ15\n+qP9O7Q2MpsGUO/4vDAuYrKBeg1ZdPSi8gwqUP2qWsGd9MIWRv3thI2903BczDc7\nr8WaIbm37vYZAS9G56E4+wIDAQABo1AwTjAdBgNVHQ4EFgQUugLrSJshOBk5TSsU\nANs4+SmJUGwwHwYDVR0jBBgwFoAUugLrSJshOBk5TSsUANs4+SmJUGwwDAYDVR0T\nBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaMJ33zAMV4korHo5aPfayV3uHoYZ\n1ChzP3eSsF+FjoscpoNSKs91ZXZF6LquzoNezbfiihK4PYqgwVD2+O0/Ty7UjN4S\nqzFKVR4OS/6lCJ8YncxoFpTntbvjgojf1DEataKFUN196PAANc3yz8cWHF4uvjPv\nWkgFqbIjb+7D1YgglNyovXkRDlRZl0LD1OQ0ZWhd4Ge1qx8mmmanoBeYZ9+DgpFC\nj9tQAbS867yeOryNe7sEOIpXAAqK/DTu0hB6+ySsDfMo4piXCc2aA/eI2DCuw08e\nw17Dz9WnupZjVdwTKzDhFgJZMLDqn37HQnT6EemLFqbcR0VPEnfyhDtZIQ==\n-----END CERTIFICATE-----"
- }
- }
- },
- {
- "kind": "DeploymentConfig",
- "apiVersion": "v1",
- "metadata": {
- "name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
- },
- "spec": {
- "strategy": {
- "type": "Recreate"
- },
- "triggers": [
- {
- "type": "ImageChange",
- "imageChangeParams": {
- "automatic": true,
- "containerNames": [
- "jenkins"
- ],
- "from": {
- "kind": "ImageStreamTag",
- "name": "jenkins:1",
- "namespace": "openshift"
- }
- }
- },
- {
- "type": "ConfigChange"
- }
- ],
- "replicas": 1,
- "selector": {
- "name": "${JENKINS_SERVICE_NAME}"
- },
- "template": {
- "metadata": {
- "creationTimestamp": null,
- "labels": {
- "name": "${JENKINS_SERVICE_NAME}"
- }
- },
- "spec": {
- "serviceAccountName": "${JENKINS_SERVICE_NAME}",
- "containers": [
- {
- "name": "jenkins",
- "image": " ",
- "readinessProbe": {
- "timeoutSeconds": 3,
- "initialDelaySeconds": 3,
- "httpGet": {
- "path": "/login",
- "port": 8080
- }
- },
- "livenessProbe": {
- "timeoutSeconds": 3,
- "initialDelaySeconds": 120,
- "httpGet": {
- "path": "/login",
- "port": 8080
- }
- },
- "env": [
- {
- "name": "JENKINS_PASSWORD",
- "value": "${JENKINS_PASSWORD}"
- },
- {
- "name": "KUBERNETES_MASTER",
- "value": "https://kubernetes.default:443"
- },
- {
- "name": "KUBERNETES_TRUST_CERTIFICATES",
- "value": "true"
- }
- ],
- "resources": {
- "limits": {
- "memory": "${MEMORY_LIMIT}"
- }
- },
- "volumeMounts": [
- {
- "name": "${JENKINS_SERVICE_NAME}-data",
- "mountPath": "/var/lib/jenkins"
- }
- ],
- "terminationMessagePath": "/dev/termination-log",
- "imagePullPolicy": "IfNotPresent",
- "capabilities": {},
- "securityContext": {
- "capabilities": {},
- "privileged": false
- }
- }
- ],
- "volumes": [
- {
- "name": "${JENKINS_SERVICE_NAME}-data",
- "emptyDir": {
- "medium": ""
- }
- }
- ],
- "restartPolicy": "Always",
- "dnsPolicy": "ClusterFirst"
- }
- }
- }
- },
- {
- "kind": "ServiceAccount",
- "apiVersion": "v1",
- "metadata": {
- "name": "${JENKINS_SERVICE_NAME}"
- }
- },
- {
- "kind": "RoleBinding",
- "apiVersion": "v1",
- "metadata": {
- "name": "${JENKINS_SERVICE_NAME}_edit"
- },
- "groupNames": null,
- "subjects": [
- {
- "kind": "ServiceAccount",
- "name": "${JENKINS_SERVICE_NAME}"
- }
- ],
- "roleRef": {
- "name": "edit"
- }
- },
- {
- "kind": "Service",
- "apiVersion": "v1",
- "metadata": {
- "name": "jenkins-jnlp"
- },
- "spec": {
- "ports": [
- {
- "name": "agent",
- "protocol": "TCP",
- "port": 50000,
- "targetPort": 50000,
- "nodePort": 0
- }
- ],
- "selector": {
- "name": "${JENKINS_SERVICE_NAME}"
- },
- "portalIP": "",
- "type": "ClusterIP",
- "sessionAffinity": "None"
- }
- },
- {
- "kind": "Service",
- "apiVersion": "v1",
- "metadata": {
- "name": "${JENKINS_SERVICE_NAME}",
- "annotations": {
- "service.alpha.openshift.io/dependencies": "[{\"name\": \"jenkins-jnlp\", \"namespace\": \"\", \"kind\": \"Service\"}]",
- "service.openshift.io/infrastructure": "true"
- },
- "creationTimestamp": null
- },
- "spec": {
- "ports": [
- {
- "name": "web",
- "protocol": "TCP",
- "port": 80,
- "targetPort": 8080,
- "nodePort": 0
- }
- ],
- "selector": {
- "name": "${JENKINS_SERVICE_NAME}"
- },
- "portalIP": "",
- "type": "ClusterIP",
- "sessionAffinity": "None"
- }
- }
- ],
- "parameters": [
- {
- "name": "MEMORY_LIMIT",
- "displayName": "Memory Limit",
- "description": "Maximum amount of memory the container can use.",
- "value": "512Mi"
- },
- {
- "name": "NAMESPACE",
- "displayName": "Namespace",
- "description": "The OpenShift Namespace where the ImageStream resides.",
- "value": "openshift"
- },
- {
- "name": "JENKINS_SERVICE_NAME",
- "displayName": "Jenkins Service Name",
- "description": "The name of the OpenShift Service exposed for the Jenkins container.",
- "value": "jenkins"
- },
- {
- "name": "JENKINS_PASSWORD",
- "displayName": "Jenkins Password",
- "description": "Password for the Jenkins 'admin' user.",
- "generate": "expression",
- "from": "[a-zA-Z0-9]{16}",
- "required": true
- }
- ],
- "labels": {
- "template": "jenkins-pipeline-template"
- }
-}
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 6fab996b2..ebd799466 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -477,6 +477,14 @@ def set_selectors(facts):
facts['hosted']['registry'] = {}
if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
facts['hosted']['registry']['selector'] = selector
+ if 'metrics' not in facts['hosted']:
+ facts['hosted']['metrics'] = {}
+ if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
+ facts['hosted']['metrics']['selector'] = None
+ if 'logging' not in facts['hosted']:
+ facts['hosted']['logging'] = {}
+ if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
+ facts['hosted']['logging']['selector'] = None
return facts
@@ -789,7 +797,7 @@ def set_deployment_facts_if_unset(facts):
curr_disabled_features = set(facts['master']['disabled_features'])
facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
else:
- if deployment_type == 'atomic-enterprise':
+ if facts['common']['deployment_subtype'] == 'registry':
facts['master']['disabled_features'] = openshift_features
if 'node' in facts:
@@ -1649,7 +1657,12 @@ class OpenShiftFacts(object):
else:
deployment_type = 'origin'
- defaults = self.get_defaults(roles, deployment_type)
+ if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
+ deployment_subtype = local_facts['common']['deployment_subtype']
+ else:
+ deployment_subtype = 'basic'
+
+ defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
provider_facts = self.init_provider_facts()
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts,
@@ -1681,7 +1694,7 @@ class OpenShiftFacts(object):
facts = set_installed_variant_rpm_facts(facts)
return dict(openshift=facts)
- def get_defaults(self, roles, deployment_type):
+ def get_defaults(self, roles, deployment_type, deployment_subtype):
""" Get default fact values
Args:
@@ -1701,6 +1714,7 @@ class OpenShiftFacts(object):
defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr,
public_ip=ip_addr,
deployment_type=deployment_type,
+ deployment_subtype=deployment_subtype,
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
@@ -1791,8 +1805,9 @@ class OpenShiftFacts(object):
filesystem='ext4',
volumeID='123'),
host=None,
- access_modes=['ReadWriteMany'],
- create_pv=True
+ access_modes=['ReadWriteOnce'],
+ create_pv=True,
+ create_pvc=False
)
),
registry=dict(
@@ -1807,7 +1822,8 @@ class OpenShiftFacts(object):
options='*(rw,root_squash)'),
host=None,
access_modes=['ReadWriteMany'],
- create_pv=True
+ create_pv=True,
+ create_pvc=True
)
),
router=dict()
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 4dbbd7f45..afeb78f95 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -24,6 +24,7 @@
local_facts:
# TODO: Deprecate deployment_type in favor of openshift_deployment_type
deployment_type: "{{ openshift_deployment_type | default(deployment_type) }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
cluster_id: "{{ openshift_cluster_id | default('default') }}"
hostname: "{{ openshift_hostname | default(None) }}"
ip: "{{ openshift_ip | default(None) }}"
@@ -40,4 +41,3 @@
- name: Set repoquery command
set_fact:
repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
diff --git a/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml b/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml
index 6bf859e82..60eefd71a 100644
--- a/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml
@@ -5,10 +5,11 @@
- name: Determine if volume is already attached to dc/docker-registry
command: "{{ openshift.common.client_binary }} get -o template dc/docker-registry --template=\\{\\{.spec.template.spec.volumes\\}\\} --output-version=v1"
changed_when: false
+ failed_when: false
register: registry_volumes_output
- set_fact:
- volume_attached: "{{ registry_volume_claim in registry_volumes_output.stdout }}"
+ volume_attached: "{{ registry_volume_claim in (registry_volumes_output).stdout | default(['']) }}"
- name: Add volume to dc/docker-registry
command: >
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 0c0c7e61e..56110c28f 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -45,6 +45,13 @@
failed_when: false
changed_when: false
+- name: Preserve Master API AWS options
+ command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ register: master_api_aws
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native"
+ failed_when: false
+ changed_when: false
+
- name: Create the master api service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"
@@ -62,13 +69,29 @@
line: "{{ item }}"
with_items: "{{ master_api_proxy.stdout_lines | default([]) }}"
+- name: Restore Master API AWS Options
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native"
+ and master_api_aws.rc == 0 and
+ not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ line: "{{ item }}"
+ with_items: "{{ master_api_aws.stdout_lines | default([]) }}"
+
- name: Preserve Master Controllers Proxy Config options
- command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
register: master_controllers_proxy
when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native"
failed_when: false
changed_when: false
+- name: Preserve Master Controllers AWS options
+ command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ register: master_controllers_aws
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native"
+ failed_when: false
+ changed_when: false
+
- name: Create the master controllers service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"
@@ -86,6 +109,15 @@
when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native"
and master_controllers_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common
+- name: Restore Master Controllers AWS Options
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ line: "{{ item }}"
+ with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}"
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native"
+ and master_controllers_aws.rc == 0 and
+ not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
+
- name: Install Master docker service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service"
@@ -99,6 +131,12 @@
failed_when: false
changed_when: false
+- name: Preserve Master AWS options
+ command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master
+ register: master_aws
+ failed_when: false
+ changed_when: false
+
- name: Create the master service env file
template:
src: "atomic-openshift-master.j2"
@@ -113,3 +151,10 @@
line: "{{ item }}"
with_items: "{{ master_proxy.stdout_lines | default([]) }}"
when: master_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common
+
+- name: Restore Master AWS Options
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
+ line: "{{ item }}"
+ with_items: "{{ master_aws.stdout_lines | default([]) }}"
+ when: master_aws.rc == 0 and not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 75d44d308..10eaeb401 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -4,9 +4,9 @@ CONFIG_FILE={{ openshift_master_config_file }}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
-{% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %}
-AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }}
-AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
+{% if openshift_cloudprovider_kind | default('') == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined %}
+AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }}
+AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}
{% endif %}
{% if 'api_env_vars' in openshift.master or 'controllers_env_vars' in openshift.master -%}
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index df1dbb85e..eef0f414e 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,7 +12,7 @@ Requires=docker.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 5ff2edae4..a8f5d7351 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -11,7 +11,7 @@ PartOf=docker.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 31e86f5bd..ced3eb76f 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -44,6 +44,13 @@ auditConfig:{{ openshift.master.audit_config | to_padded_yaml(level=1) }}
{% endif %}
controllerLeaseTTL: {{ openshift.master.controller_lease_ttl | default('30') }}
{% endif %}
+{% if openshift.common.version_gte_3_3_or_1_3 | bool %}
+controllerConfig:
+ serviceServingCert:
+ signer:
+ certFile: service-signer.crt
+ keyFile: service-signer.key
+{% endif %}
controllers: '*'
corsAllowedOrigins:
{% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %}
@@ -156,6 +163,7 @@ networkConfig:
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.common.portal_net }}
+ externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | to_padded_yaml(1,2) }}
oauthConfig:
{% if 'oauth_always_show_provider_selection' in openshift.master %}
alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }}
@@ -173,7 +181,7 @@ oauthConfig:
{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
masterCA: ca-bundle.crt
{% else %}
- masterCA: ca.rt
+ masterCA: ca.crt
{% endif %}
masterPublicURL: {{ openshift.master.public_api_url }}
masterURL: {{ openshift.master.api_url }}
@@ -210,7 +218,7 @@ serviceAccountConfig:
{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
masterCA: ca-bundle.crt
{% else %}
- masterCA: ca.rt
+ masterCA: ca.crt
{% endif %}
privateKeyFile: serviceaccounts.private.key
publicKeyFiles:
diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2
index 97f698b68..be7644710 100644
--- a/roles/openshift_master/templates/master_docker/master.docker.service.j2
+++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2
@@ -8,7 +8,7 @@ Wants=etcd_container.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master
Restart=always
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index 41308bd81..43fb3cafa 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -4,9 +4,9 @@ CONFIG_FILE={{ openshift_master_config_file }}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
-{% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %}
-AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }}
-AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
+{% if openshift_cloudprovider_kind | default('') == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined %}
+AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }}
+AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}
{% endif %}
{% if 'api_env_vars' in openshift.master -%}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index 37a5d75f2..6d26a69eb 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -4,9 +4,9 @@ CONFIG_FILE={{ openshift_master_config_file }}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
-{% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %}
-AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }}
-AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
+{% if openshift_cloudprovider_kind | default('') == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_access_key is defined %}
+AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }}
+AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}
{% endif %}
{% if 'controllers_env_vars' in openshift.master -%}
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index aafb06f93..ffde59358 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -164,3 +164,29 @@
owner: "{{ item }}"
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items: "{{ client_users }}"
+
+# Ensure ca-bundle exists for 3.2+ configuration
+- name: Check for ca-bundle.crt
+ stat:
+ path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
+ register: ca_bundle_stat
+ failed_when: false
+
+- name: Check for ca.crt
+ stat:
+ path: "{{ openshift.common.config_base }}/master/ca.crt"
+ register: ca_crt_stat
+ failed_when: false
+
+- name: Migrate ca.crt to ca-bundle.crt
+ command: mv ca.crt ca-bundle.crt
+ args:
+ chdir: "{{ openshift.common.config_base }}/master"
+ when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
+
+- name: Link ca.crt to ca-bundle.crt
+ file:
+ src: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
+ path: "{{ openshift.common.config_base }}/master/ca.crt"
+ state: link
+ when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 17c31ec05..e0c0fc644 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -17,7 +17,6 @@
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
logging_public_url: "{{ openshift_master_logging_public_url | default(None) }}"
- metrics_public_url: "{{ openshift_master_metrics_public_url | default(None) }}"
logout_url: "{{ openshift_master_logout_url | default(None) }}"
extension_scripts: "{{ openshift_master_extension_scripts | default(None) }}"
extension_stylesheets: "{{ openshift_master_extension_stylesheets | default(None) }}"
@@ -80,3 +79,4 @@
api_env_vars: "{{ openshift_master_api_env_vars | default(None) }}"
controllers_env_vars: "{{ openshift_master_controllers_env_vars | default(None) }}"
audit_config: "{{ openshift_master_audit_config | default(None) }}"
+ metrics_public_url: "{% if openshift_hosted_metrics_deploy | default(false) %}https://{{ metrics_hostname }}/hawkular/metrics{% endif %}"
diff --git a/roles/openshift_master_facts/vars/main.yml b/roles/openshift_master_facts/vars/main.yml
index 086d8340c..406d50c24 100644
--- a/roles/openshift_master_facts/vars/main.yml
+++ b/roles/openshift_master_facts/vars/main.yml
@@ -17,4 +17,9 @@ builddefaults_yaml:
- name: https_proxy
value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}"
- name: no_proxy
- value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}" \ No newline at end of file
+ value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}"
+
+metrics_hostname: "{{ openshift_hosted_metrics_public_url
+ | default('hawkular-metrics.' ~ (openshift.master.default_subdomain
+ | default(openshift_master_default_subdomain )))
+ | oo_hostname_from_url }}"
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
new file mode 100644
index 000000000..edb7369de
--- /dev/null
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -0,0 +1,31 @@
+---
+- name: restart master
+ service: name={{ openshift.common.service_type }}-master state=restarted
+ when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
+ notify: Verify API Server
+
+- name: restart master api
+ service: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
+
+- name: restart master controllers
+ service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_metrics/tasks/install.yml b/roles/openshift_metrics/tasks/install.yml
new file mode 100644
index 000000000..9c4eb22d7
--- /dev/null
+++ b/roles/openshift_metrics/tasks/install.yml
@@ -0,0 +1,114 @@
+---
+
+- name: Test if metrics-deployer service account exists
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace=openshift-infra
+ get serviceaccount metrics-deployer -o json
+ register: serviceaccount
+ changed_when: false
+ failed_when: false
+
+- name: Create metrics-deployer Service Account
+ shell: >
+ echo {{ metrics_deployer_sa | to_json | quote }} |
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ create -f -
+ when: serviceaccount.rc == 1
+
+- name: Test edit permissions
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}'
+ register: edit_rolebindings
+ changed_when: false
+
+- name: Add edit permission to the openshift-infra project to metrics-deployer SA
+ command: >
+ {{ openshift.common.admin_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ policy add-role-to-user edit
+ system:serviceaccount:openshift-infra:metrics-deployer
+ when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout"
+
+- name: Test cluster-reader permissions
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}'
+ register: cluster_reader_clusterrolebindings
+ changed_when: false
+
+- name: Add cluster-reader permission to the openshift-infra project to heapster SA
+ command: >
+ {{ openshift.common.admin_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ policy add-cluster-role-to-user cluster-reader
+ system:serviceaccount:openshift-infra:heapster
+ when: "'system:serviceaccount:openshift-infra:heapster' not in cluster_reader_clusterrolebindings.stdout"
+
+- name: Create metrics-deployer secret
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ secrets new metrics-deployer nothing=/dev/null
+ register: metrics_deployer_secret
+ changed_when: metrics_deployer_secret.rc == 0
+ failed_when: "metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr"
+
+# TODO: extend this to allow user passed in certs or generating cert with
+# OpenShift CA
+- name: Build metrics deployer command
+ set_fact:
+ deployer_cmd: "{{ openshift.common.client_binary }} process -f \
+ {{ metrics_template_dir }}/metrics-deployer.yaml -v \
+ HAWKULAR_METRICS_HOSTNAME={{ metrics_hostname }},USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }},DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }},METRIC_DURATION={{ openshift.hosted.metrics.duration }},METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}{{ image_prefix }}{{ image_version }},MODE={{ deployment_mode }} \
+ | {{ openshift.common.client_binary }} --namespace openshift-infra \
+ --config={{ openshift_metrics_kubeconfig }} \
+ create -o name -f -"
+
+- name: Deploy Metrics
+ shell: "{{ deployer_cmd }}"
+ register: deploy_metrics
+ failed_when: "'already exists' not in deploy_metrics.stderr and deploy_metrics.rc != 0"
+ changed_when: deploy_metrics.rc == 0
+
+- set_fact:
+ deployer_pod: "{{ deploy_metrics.stdout[1:2] }}"
+
+# TODO: re-enable this once the metrics deployer validation issue is fixed
+# when using dynamically provisioned volumes
+- name: "Wait for image pull and deployer pod"
+ shell: >
+ {{ openshift.common.client_binary }}
+ --namespace openshift-infra
+ --config={{ openshift_metrics_kubeconfig }}
+ get {{ deploy_metrics.stdout }}
+ register: deploy_result
+ until: "{{ 'Completed' in deploy_result.stdout }}"
+ failed_when: False
+ retries: 60
+ delay: 10
+
+- name: Configure master for metrics
+ modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: assetConfig.metricsPublicURL
+ yaml_value: "https://{{ metrics_hostname }}/hawkular/metrics"
+ notify: restart master
+
+- name: Store metrics public_url
+ openshift_facts:
+ role: master
+ local_facts:
+ metrics_public_url: "https://{{ metrics_hostname }}/hawkular/metrics"
+ when: deploy_result | changed
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 43b85204a..88432a9f8 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -1,64 +1,87 @@
---
-- name: Copy Configuration to temporary conf
- command: >
- cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{hawkular_tmp_conf}}
- changed_when: false
+- fail:
+ msg: This role required openshift_master_default_subdomain or openshift_hosted_metrics_public_url be set
+ when: openshift.master.metrics_public_url | default(openshift_hosted_metrics_public_url | default(openshift.master.default_subdomain | default(openshift_master_default_subdomain | default(none)))) is none
-- name: Create metrics-deployer Service Account
- shell: >
- echo {{ deployer_service_account | to_json | quote }} |
- {{ openshift.common.client_binary }} create
- -n openshift-infra
- --config={{hawkular_tmp_conf}}
- -f -
- register: deployer_create_service_account
- failed_when: "'already exists' not in deployer_create_service_account.stderr and deployer_create_service_account.rc != 0"
- changed_when: deployer_create_service_account.rc == 0
+- name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
-- name: Create metrics-deployer Secret
- command: >
- {{ openshift.common.client_binary }}
- secrets new metrics-deployer
- nothing=/dev/null
- --config={{hawkular_tmp_conf}}
- -n openshift-infra
- register: deployer_create_secret
- failed_when: "'already exists' not in deployer_create_secret.stderr and deployer_create_secret.rc !=0"
- changed_when: deployer_create_secret.rc == 0
+- name: Record kubeconfig tmp dir
+ set_fact:
+ openshift_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
-- name: Configure role/user permissions
+- name: Copy the admin client config(s)
command: >
- {{ openshift.common.admin_binary }} {{item}}
- --config={{hawkular_tmp_conf}}
- with_items: "{{hawkular_permission_oc_commands}}"
- register: hawkular_perm_task
- failed_when: "'already exists' not in hawkular_perm_task.stderr and hawkular_perm_task.rc != 0"
- changed_when: hawkular_perm_task.rc == 0
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_metrics_kubeconfig }}
+ changed_when: False
+
+- name: Set hosted metrics facts
+ openshift_facts:
+ role: hosted
+ openshift_env: "{{ hostvars
+ | oo_merge_hostvars(vars, inventory_hostname)
+ | oo_openshift_env }}"
+ openshift_env_structures:
+ - 'openshift.hosted.metrics.*'
+
+- set_fact:
+ # Prefer the master facts over bare variables if present, prefer
+ # metrics_public_url over creating a default using default_subdomain
+ metrics_hostname: "{{ openshift.hosted.metrics.public_url
+ | default('hawkular-metrics.' ~ (openshift.master.default_subdomain
+ | default(openshift_master_default_subdomain )))
+ | oo_hostname_from_url }}"
+ metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}"
+ metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}"
+ metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
+ cassandra_nodes: "{{ ',CASSANDRA_NODES=' ~ openshift.hosted.metrics.cassandra_nodes if 'cassandra' in openshift.hosted.metrics else '' }}"
+ cassandra_pv_size: "{{ ',CASSANDRA_PV_SIZE=' ~ openshift.hosted.metrics.storage_volume_size if openshift.hosted.metrics.storage_volume_size | default(none) is not none else '' }}"
+ image_prefix: "{{ ',IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer_prefix if 'deployer_prefix' in openshift.hosted.metrics else '' }}"
+ image_version: "{{ ',IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer_version if 'deployer_version' in openshift.hosted.metrics else '' }}"
-- name: Check openshift_master_default_subdomain
- fail:
- msg: "Default subdomain should be defined"
- when: openshift.master.default_subdomain is not defined
-- name: Create Heapster and Hawkular/Cassandra Services
+- name: Check for existing metrics pods
shell: >
- {{ openshift.common.client_binary }} process -f \
- /usr/share/openshift/examples/infrastructure-templates/{{ hawkular_type }}/metrics-deployer.yaml -v \
- HAWKULAR_METRICS_HOSTNAME=hawkular-metrics.{{ openshift.master.default_subdomain }},USE_PERSISTENT_STORAGE={{ hawkular_persistence }},METRIC_DURATION={{ openshift.hosted.metrics.duration }},METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }} \
- | {{ openshift.common.client_binary }} create -n openshift-infra --config={{hawkular_tmp_conf}} -f -
- register: oex_heapster_services
- failed_when: "'already exists' not in oex_heapster_services.stderr and oex_heapster_services.rc != 0"
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get pods -l {{ item }} | grep -q Running
+ register: metrics_pods_status
+ with_items:
+ - metrics-infra=hawkular-metrics
+ - metrics-infra=heapster
+ - metrics-infra=hawkular-cassandra
+ failed_when: false
changed_when: false
-- name: Clean temporary config file
- command: >
- rm -rf {{hawkular_tmp_conf}}
+- name: Check for previous deployer
+ shell: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer-
+ register: metrics_deployer_status
+ failed_when: false
changed_when: false
-- name: "Wait for image pull and deployer pod"
- shell: "{{ openshift.common.client_binary }} get pods -n openshift-infra | grep metrics-deployer.*Completed"
- register: result
- until: result.rc == 0
- retries: 60
- delay: 10
+- name: Record current deployment status
+ set_fact:
+ greenfield: "{{ not metrics_deployer_status.rc == 0 }}"
+ failed_error: "{{ True if 'Error' in metrics_deployer_status.stdout else False }}"
+ metrics_running: "{{ metrics_pods_status.results | oo_collect(attribute='rc') == [0,0,0] }}"
+
+- name: Set deployment mode
+ set_fact:
+ deployment_mode: "{{ 'refresh' if (failed_error | bool or metrics_upgrade | bool) else 'deploy' }}"
+
+# TODO: handle non greenfield deployments in the future
+- include: install.yml
+ when: greenfield
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_metrics/vars/main.yaml
index 82d9d29f7..0331bcb89 100644
--- a/roles/openshift_metrics/vars/main.yaml
+++ b/roles/openshift_metrics/vars/main.yaml
@@ -2,13 +2,13 @@ hawkular_permission_oc_commands:
- policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
- policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
-deployer_service_account:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: metrics-deployer
- secrets:
- - name: metrics-deployer
+metrics_deployer_sa:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: metrics-deployer
+ secrets:
+ - name: metrics-deployer
hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
@@ -17,3 +17,4 @@ hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}tru
hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
+metrics_upgrade: openshift.hosted.metrics.upgrade | default(False)
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 9c71af6d9..dc36b542d 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -55,12 +55,6 @@
- name: Install the systemd units
include: systemd_units.yml
-- name: Reload systemd units
- command: systemctl daemon-reload
- when: openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)
- notify:
- - restart node
-
- name: Start and enable openvswitch docker service
service: name=openvswitch.service enabled=yes state=started
when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
@@ -89,10 +83,10 @@
create: true
with_items:
- regex: '^AWS_ACCESS_KEY_ID='
- line: "AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }}"
+ line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }}"
- regex: '^AWS_SECRET_ACCESS_KEY='
- line: "AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}"
- when: "'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws"
+ line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}"
+ when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"
notify:
- restart node
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
index e384c1bd7..22b539d16 100644
--- a/roles/openshift_node/tasks/storage_plugins/nfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -17,16 +17,16 @@
persistent: yes
when: ansible_selinux and ansible_selinux.status == "enabled" and virt_use_nfs_output.rc == 0
-- name: Check for existence of virt_sandbox_use_nfs seboolean
+- name: Check for existence of virt_sandbox_use_nfs seboolean (RHEL)
command: getsebool virt_sandbox_use_nfs
register: virt_sandbox_use_nfs_output
- when: ansible_selinux and ansible_selinux.status == "enabled"
+ when: ansible_distribution != "Fedora" and ansible_selinux and ansible_selinux.status == "enabled"
failed_when: false
changed_when: false
-- name: Set seboolean to allow nfs storage plugin access from containers(sandbox)
+- name: Set seboolean to allow nfs storage plugin access from containers(sandbox) (RHEL)
seboolean:
name: virt_sandbox_use_nfs
state: yes
persistent: yes
- when: ansible_selinux and ansible_selinux.status == "enabled" and virt_sandbox_use_nfs_output.rc == 0
+ when: ansible_distribution != "Fedora" and ansible_selinux and ansible_selinux.status == "enabled" and virt_sandbox_use_nfs_output.rc == 0
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 39e5386d4..025cb567e 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -47,3 +47,9 @@
line: "IMAGE_VERSION={{ openshift_image_tag }}"
notify:
- restart node
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)
+ notify:
+ - restart node
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 414f0d5e3..68d153052 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -20,14 +20,14 @@ masterClientConnectionOverrides:
qps: 100
{% endif %}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
-{% if openshift.common.use_openshift_sdn | bool and not openshift.common.version_gte_3_3_or_1_3 | bool %}
+{% if openshift.common.use_openshift_sdn | bool %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
# networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if ( openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool ) and not openshift.common.version_gte_3_3_or_1_3 | bool%}
+{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index f5b6f501d..3b5865a50 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -15,7 +15,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_repos/files/fedora-openshift-enterprise/gpg_keys/.gitkeep b/roles/openshift_repos/files/fedora-openshift-enterprise/gpg_keys/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/fedora-openshift-enterprise/gpg_keys/.gitkeep
+++ /dev/null
diff --git a/roles/openshift_repos/files/fedora-openshift-enterprise/repos/.gitkeep b/roles/openshift_repos/files/fedora-openshift-enterprise/repos/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/fedora-openshift-enterprise/repos/.gitkeep
+++ /dev/null
diff --git a/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo
deleted file mode 100644
index bc0435d82..000000000
--- a/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo
+++ /dev/null
@@ -1,8 +0,0 @@
-[maxamillion-fedora-openshift]
-name=Copr repo for fedora-openshift owned by maxamillion
-baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/
-skip_if_unavailable=True
-gpgcheck=1
-gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg
-enabled=1
-enabled_metadata=1 \ No newline at end of file
diff --git a/roles/openshift_repos/files/online/repos/enterprise-v3.repo b/roles/openshift_repos/files/online/repos/enterprise-v3.repo
deleted file mode 100644
index 92bd35834..000000000
--- a/roles/openshift_repos/files/online/repos/enterprise-v3.repo
+++ /dev/null
@@ -1,10 +0,0 @@
-[enterprise-v3]
-name=OpenShift Enterprise
-baseurl=https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-ose/
- https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-ose/
-enabled=1
-gpgcheck=0
-failovermethod=priority
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
diff --git a/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo
deleted file mode 100644
index b4215679f..000000000
--- a/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo
+++ /dev/null
@@ -1,11 +0,0 @@
-[rhel-7-libra-candidate]
-name=rhel-7-libra-candidate - \$basearch
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/
- https://mirror.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/
-gpgkey=https://mirror.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-openshifthosted
-skip_if_unavailable=True
-gpgcheck=0
-enabled=1
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
-sslverify=False
diff --git a/roles/openshift_repos/files/openshift-enterprise/gpg_keys/.gitkeep b/roles/openshift_repos/files/openshift-enterprise/gpg_keys/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/openshift-enterprise/gpg_keys/.gitkeep
+++ /dev/null
diff --git a/roles/openshift_repos/files/openshift-enterprise/repos/.gitkeep b/roles/openshift_repos/files/openshift-enterprise/repos/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/openshift-enterprise/repos/.gitkeep
+++ /dev/null
diff --git a/roles/openshift_repos/files/removed/repos/epel7-openshift.repo b/roles/openshift_repos/files/removed/repos/epel7-openshift.repo
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/removed/repos/epel7-openshift.repo
+++ /dev/null
diff --git a/roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo b/roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo
deleted file mode 100644
index 0b21e0a65..000000000
--- a/roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo
+++ /dev/null
@@ -1,7 +0,0 @@
-[maxamillion-origin-next]
-name=Copr repo for origin-next owned by maxamillion
-baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/
-skip_if_unavailable=True
-gpgcheck=1
-gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg
-enabled=1
diff --git a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo
+++ /dev/null
diff --git a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo
+++ /dev/null
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 07a8d28fd..9be168611 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -29,62 +29,20 @@
when: openshift_additional_repos | length == 0 and not openshift.common.is_containerized | bool
notify: refresh cache
-- name: Remove any yum repo files for other deployment types RHEL/CentOS
- file:
- path: "/etc/yum.repos.d/{{ item | basename }}"
- state: absent
- with_fileglob:
- - "fedora-openshift-enterprise/repos/*"
- - "fedora-origin/repos/*"
- - "online/repos/*"
- - "openshift-enterprise/repos/*"
- - "origin/repos/*"
- - "removed/repos/*"
- when: not openshift.common.is_containerized | bool
- and not (item | search("/files/" ~ openshift_deployment_type ~ "/repos"))
- and (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
- notify: refresh cache
-
-- name: Remove any yum repo files for other deployment types Fedora
- file:
- path: "{{ item | basename }}"
- state: absent
- with_fileglob:
- - "fedora-openshift-enterprise/repos/*"
- - "fedora-origin/repos/*"
- - "online/repos/*"
- - "openshift-enterprise/repos/*"
- - "origin/repos/*"
- - "removed/repos/*"
- when: not openshift.common.is_containerized | bool
- and not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos"))
- and (ansible_distribution == "Fedora")
- notify: refresh cache
-
-- name: Configure gpg keys if needed
+- name: Configure origin gpg keys if needed
copy:
- src: "{{ item }}"
+ src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
dest: /etc/pki/rpm-gpg/
- with_fileglob:
- - "{{ openshift_deployment_type }}/gpg_keys/*"
- notify: refresh cache
- when: not openshift.common.is_containerized | bool
-
-- name: Configure yum repositories RHEL/CentOS
- copy:
- src: "{{ item }}"
- dest: /etc/yum.repos.d/
- with_fileglob:
- - "{{ openshift_deployment_type }}/repos/*"
notify: refresh cache
- when: (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
+ when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
+ and openshift_deployment_type == 'origin'
and not openshift.common.is_containerized | bool
-- name: Configure yum repositories Fedora
+- name: Configure origin yum repositories RHEL/CentOS
copy:
- src: "{{ item }}"
+ src: origin/repos/openshift-ansible-centos-paas-sig.repo
dest: /etc/yum.repos.d/
- with_fileglob:
- - "fedora-{{ openshift_deployment_type }}/repos/*"
notify: refresh cache
- when: (ansible_distribution == "Fedora") and not openshift.common.is_containerized | bool
+ when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
+ and openshift_deployment_type == 'origin'
+ and not openshift.common.is_containerized | bool
diff --git a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
index 1efab9466..8715fc64e 100644
--- a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
+++ b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
@@ -15,7 +15,7 @@
template:
src: serviceaccount.j2
dest: "/tmp/openshift/{{ item }}-serviceaccount.yaml"
- with_items: openshift_serviceaccounts_names
+ with_items: '{{ openshift_serviceaccounts_names }}'
- name: Get current security context constraints
shell: >
@@ -30,8 +30,8 @@
insertafter: "^users:$"
when: "item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users }}"
with_nested:
- - openshift_serviceaccounts_names
- - scc_test.results
+ - '{{ openshift_serviceaccounts_names }}'
+ - '{{ scc_test.results }}'
- name: Apply new scc rules for service accounts
command: "{{ openshift.common.client_binary }} update -f /tmp/openshift/scc.yaml --api-version=v1"
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index fe7f83cbb..08d0b8540 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -20,21 +20,37 @@
- name: Ensure export directories exist
file:
- path: "{{ openshift.hosted.registry.storage.nfs.directory }}/{{ item }}"
+ path: "{{ item.storage.nfs.directory }}/{{ item.storage.volume.name }}"
state: directory
mode: 0777
owner: nfsnobody
group: nfsnobody
with_items:
- - "{{ openshift.hosted.registry.storage.volume.name }}"
+ - "{{ openshift.hosted.registry }}"
+ - "{{ openshift.hosted.metrics }}"
- name: Configure exports
template:
- dest: /etc/exports
+ dest: /etc/exports.d/openshift-ansible.exports
src: exports.j2
notify:
- restart nfs-server
+# Now that we're putting our exports in our own file clean up the old ones
+- name: register exports
+ command: cat /etc/exports.d/openshift-ansible.exports
+ register: exports_out
+
+- name: remove exports from /etc/exports
+ lineinfile:
+ dest: /etc/exports
+ line: "{{ item }}"
+ state: absent
+ with_items: "{{ exports_out.stdout_lines | default([]) }}"
+ when: exports_out.rc == 0
+ notify:
+ - restart nfs-server
+
- name: Enable and start services
service:
name: "{{ item }}"
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index c1e1994b0..d6d936b72 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -1 +1,2 @@
{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }}
+{{ openshift.hosted.metrics.storage.nfs.directory }}/{{ openshift.hosted.metrics.storage.volume.name }} {{ openshift.hosted.metrics.storage.nfs.options }}
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 6e5d2b22c..a3a99d248 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -73,6 +73,10 @@
msg: openshift_version role was unable to set openshift_pkg_version
when: openshift_pkg_version is not defined
+- fail:
+ msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories."
+ when: not is_containerized | bool and openshift_version == '0.0'
+
# We can't map an openshift_release to full rpm version like we can with containers, make sure
# the rpm version we looked up matches the release requested and error out if not.
- fail:
diff --git a/utils/.gitignore b/utils/.gitignore
index 68759c0ba..facfeee54 100644
--- a/utils/.gitignore
+++ b/utils/.gitignore
@@ -43,3 +43,6 @@ coverage.xml
# Sphinx documentation
docs/_build/
+oo-install
+oo-installenv
+cover
diff --git a/utils/Makefile b/utils/Makefile
new file mode 100644
index 000000000..79c27626a
--- /dev/null
+++ b/utils/Makefile
@@ -0,0 +1,83 @@
+########################################################
+
+# Makefile for OpenShift: Atomic Quick Installer
+#
+# useful targets (not all implemented yet!):
+# make clean -- Clean up garbage
+# make ci ------------------- Execute CI steps (for travis or jenkins)
+
+########################################################
+
+# > VARIABLE = value
+#
+# Normal setting of a variable - values within it are recursively
+# expanded when the variable is USED, not when it's declared.
+#
+# > VARIABLE := value
+#
+# Setting of a variable with simple expansion of the values inside -
+# values within it are expanded at DECLARATION time.
+
+########################################################
+
+
+NAME := oo-install
+TESTPACKAGE := oo-install
+SHORTNAME := ooinstall
+
+sdist: clean
+ python setup.py sdist
+ rm -fR $(SHORTNAME).egg-info
+
+clean:
+ @find . -type f -regex ".*\.py[co]$$" -delete
+ @find . -type f \( -name "*~" -or -name "#*" \) -delete
+ @rm -fR build dist rpm-build MANIFEST htmlcov .coverage cover ooinstall.egg-info oo-install
+ @rm -fR $(NAME)env
+
+viewcover:
+ xdg-open cover/index.html
+
+virtualenv:
+ @echo "#############################################"
+ @echo "# Creating a virtualenv"
+ @echo "#############################################"
+ virtualenv $(NAME)env
+ . $(NAME)env/bin/activate && pip install setuptools==17.1.1
+ . $(NAME)env/bin/activate && pip install -r test-requirements.txt
+# If there are any special things to install do it here
+# . $(NAME)env/bin/activate && INSTALL STUFF
+
+ci-unittests:
+ @echo "#############################################"
+ @echo "# Running Unit Tests in virtualenv"
+ @echo "#############################################"
+ . $(NAME)env/bin/activate && nosetests -v --with-coverage --cover-html --cover-min-percentage=70 --cover-package=$(SHORTNAME) test/
+ @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
+
+ci-pylint:
+ @echo "#############################################"
+ @echo "# Running PyLint Tests in virtualenv"
+ @echo "#############################################"
+ . $(NAME)env/bin/activate && python -m pylint --rcfile ../git/.pylintrc src/ooinstall/cli_installer.py src/ooinstall/oo_config.py src/ooinstall/openshift_ansible.py src/ooinstall/variants.py
+
+ci-list-deps:
+ @echo "#############################################"
+ @echo "# Listing all pip deps"
+ @echo "#############################################"
+ . $(NAME)env/bin/activate && pip freeze
+
+ci-pyflakes:
+ @echo "#################################################"
+ @echo "# Running Pyflakes Compliance Tests in virtualenv"
+ @echo "#################################################"
+ . $(NAME)env/bin/activate && pyflakes src/ooinstall/*.py
+
+ci-pep8:
+ @echo "#############################################"
+ @echo "# Running PEP8 Compliance Tests in virtualenv"
+ @echo "#############################################"
+ . $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 src/$(SHORTNAME)/
+
+ci: clean virtualenv ci-list-deps ci-pep8 ci-pylint ci-pyflakes ci-unittests
+ :
diff --git a/utils/README.md b/utils/README.md
new file mode 100644
index 000000000..2abf2705e
--- /dev/null
+++ b/utils/README.md
@@ -0,0 +1,41 @@
+# Running Tests (NEW)
+
+Run the command:
+
+ make ci
+
+to run an array of unittests locally.
+
+You will get errors if the log files already exist and can not be
+written to by the current user (`/tmp/ansible.log` and
+`/tmp/installer.txt`). *We're working on it.*
+
+# Running From Source
+
+You will need to setup a **virtualenv** to run from source:
+
+ $ virtualenv oo-install
+ $ source ./oo-install/bin/activate
+ $ virtualenv --relocatable ./oo-install/
+ $ python setup.py install
+
+The virtualenv `bin` directory should now be at the start of your
+`$PATH`, and `oo-install` is ready to use from your shell.
+
+You can exit the virtualenv with:
+
+ $ deactivate
+
+# Testing (OLD)
+
+*This section is deprecated, but still works*
+
+First, run the **virtualenv setup steps** described above.
+
+Install some testing libraries: (we cannot do this via setuptools due to the version virtualenv bundles)
+
+$ pip install mock nose
+
+Then run the tests with:
+
+$ oo-install/bin/nosetests
diff --git a/utils/README.txt b/utils/README.txt
deleted file mode 100644
index 6a6a1d24d..000000000
--- a/utils/README.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-## Running From Source
-
-You will need to setup a virtualenv to run from source and execute the unit tests.
-
-$ virtualenv oo-install
-$ source ./oo-install/bin/activate
-$ virtualenv --relocatable ./oo-install/
-$ python setup.py install
-
-The virtualenv bin directory should now be at the start of your $PATH, and oo-install is ready to use from your shell.
-
-You can exit the virtualenv with:
-
-$ deactivate
-
-## Testing
-
-Install some testing libraries: (we cannot do this via setuptools due to the version virtualenv bundles)
-
-$ pip install mock nose
-
-Then run the tests with:
-
-$ oo-install/bin/nosetests
diff --git a/utils/docs/config.md b/utils/docs/config.md
index 2729f8d37..3677ffe2e 100644
--- a/utils/docs/config.md
+++ b/utils/docs/config.md
@@ -7,31 +7,38 @@ The default location this config file will be written to ~/.config/openshift/ins
## Example
```
-version: v1
+version: v2
variant: openshift-enterprise
-variant_version: 3.0
-ansible_ssh_user: root
-hosts:
-- ip: 10.0.0.1
- hostname: master-private.example.com
- public_ip: 24.222.0.1
- public_hostname: master.example.com
- master: true
- node: true
- containerized: true
- connect_to: 24.222.0.1
-- ip: 10.0.0.2
- hostname: node1-private.example.com
- public_ip: 24.222.0.2
- public_hostname: node1.example.com
- node: true
- connect_to: 10.0.0.2
-- ip: 10.0.0.3
- hostname: node2-private.example.com
- public_ip: 24.222.0.3
- public_hostname: node2.example.com
- node: true
- connect_to: 10.0.0.3
+variant_version: 3.3
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 24.222.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ containerized: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
```
## Primary Settings
@@ -76,5 +83,3 @@ Defines the user ansible will use to ssh to remote systems for gathering facts a
### ansible_log_path
Default: /tmp/ansible.log
-
-
diff --git a/utils/src/ooinstall/__init__.py b/utils/src/ooinstall/__init__.py
index 944dea3b5..96e495e19 100644
--- a/utils/src/ooinstall/__init__.py
+++ b/utils/src/ooinstall/__init__.py
@@ -1,5 +1 @@
-# TODO: Temporarily disabled due to importing old code into openshift-ansible
-# repo. We will work on these over time.
# pylint: disable=missing-docstring
-
-from .oo_config import OOConfig
diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py
index 2537a099f..e51890a22 100644
--- a/utils/src/ooinstall/ansible_plugins/facts_callback.py
+++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py
@@ -6,6 +6,7 @@ import os
import yaml
from ansible.plugins.callback import CallbackBase
+
# pylint: disable=super-init-not-called
class CallbackModule(CallbackBase):
@@ -19,9 +20,9 @@ class CallbackModule(CallbackBase):
self.hosts_yaml_name = os.environ['OO_INSTALL_CALLBACK_FACTS_YAML']
except KeyError:
raise ValueError('The OO_INSTALL_CALLBACK_FACTS_YAML environment '
- 'variable must be set.')
+ 'variable must be set.')
self.hosts_yaml = os.open(self.hosts_yaml_name, os.O_CREAT |
- os.O_WRONLY)
+ os.O_WRONLY)
def v2_on_any(self, *args, **kwargs):
pass
@@ -72,9 +73,9 @@ class CallbackModule(CallbackBase):
def v2_playbook_on_task_start(self, name, is_conditional):
pass
- #pylint: disable=too-many-arguments
+ # pylint: disable=too-many-arguments
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None,
- encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+ encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def v2_playbook_on_setup(self):
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index d677ea8c8..2ba7efe3e 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -5,37 +5,49 @@
import os
import re
import sys
-from distutils.version import LooseVersion
+import logging
import click
+from pkg_resources import parse_version
from ooinstall import openshift_ansible
-from ooinstall import OOConfig
+from ooinstall.oo_config import OOConfig
from ooinstall.oo_config import OOConfigInvalidHostError
from ooinstall.oo_config import Host, Role
from ooinstall.variants import find_variant, get_variant_version_combos
+installer_log = logging.getLogger('installer')
+installer_log.setLevel(logging.CRITICAL)
+installer_file_handler = logging.FileHandler('/tmp/installer.txt')
+installer_file_handler.setFormatter(
+ logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
+# Example output:
+# 2016-08-23 07:34:58,480 - installer - DEBUG - Going to 'load_system_facts'
+installer_file_handler.setLevel(logging.DEBUG)
+installer_log.addHandler(installer_file_handler)
+
DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
UPGRADE_MAPPINGS = {
- '3.0':{
- 'minor_version' :'3.0',
- 'minor_playbook':'v3_0_minor/upgrade.yml',
- 'major_version' :'3.1',
- 'major_playbook':'v3_0_to_v3_1/upgrade.yml',
- },
- '3.1':{
- 'minor_version' :'3.1',
- 'minor_playbook':'v3_1_minor/upgrade.yml',
- 'major_playbook':'v3_1_to_v3_2/upgrade.yml',
- 'major_version' :'3.2',
- },
- '3.2':{
- 'minor_version' :'3.2',
- 'minor_playbook':'v3_2/upgrade.yml',
- 'major_playbook':'v3_2/upgrade.yml',
- 'major_version' :'3.3',
- }
- }
+ '3.0': {
+ 'minor_version': '3.0',
+ 'minor_playbook': 'v3_0_minor/upgrade.yml',
+ 'major_version': '3.1',
+ 'major_playbook': 'v3_0_to_v3_1/upgrade.yml',
+ },
+ '3.1': {
+ 'minor_version': '3.1',
+ 'minor_playbook': 'v3_1_minor/upgrade.yml',
+ 'major_playbook': 'v3_1_to_v3_2/upgrade.yml',
+ 'major_version': '3.2',
+ },
+ '3.2': {
+ 'minor_version': '3.2',
+ 'minor_playbook': 'v3_2/upgrade.yml',
+ 'major_playbook': 'v3_2/upgrade.yml',
+ 'major_version': '3.3',
+ }
+}
+
def validate_ansible_dir(path):
if not path:
@@ -44,6 +56,7 @@ def validate_ansible_dir(path):
# if not os.path.exists(path)):
# raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
+
def is_valid_hostname(hostname):
if not hostname or len(hostname) > 255:
return False
@@ -52,11 +65,13 @@ def is_valid_hostname(hostname):
allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
+
def validate_prompt_hostname(hostname):
if hostname == '' or is_valid_hostname(hostname):
return hostname
raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
+
def get_ansible_ssh_user():
click.clear()
message = """
@@ -67,6 +82,7 @@ passwordless sudo access.
click.echo(message)
return click.prompt('User for ssh access', default='root')
+
def get_master_routingconfig_subdomain():
click.clear()
message = """
@@ -75,30 +91,12 @@ You might want to override the default subdomain used for exposed routes. If you
click.echo(message)
return click.prompt('New default subdomain (ENTER for none)', default='')
+
def list_hosts(hosts):
hosts_idx = range(len(hosts))
for idx in hosts_idx:
click.echo(' {}: {}'.format(idx, hosts[idx]))
-def delete_hosts(hosts):
- while True:
- list_hosts(hosts)
- del_idx = click.prompt('Select host to delete, y/Y to confirm, ' \
- 'or n/N to add more hosts', default='n')
- try:
- del_idx = int(del_idx)
- hosts.remove(hosts[del_idx])
- except IndexError:
- click.echo("\"{}\" doesn't match any hosts listed.".format(del_idx))
- except ValueError:
- try:
- response = del_idx.lower()
- if response in ['y', 'n']:
- return hosts, response
- click.echo("\"{}\" doesn't correspond to any valid input.".format(del_idx))
- except AttributeError:
- click.echo("\"{}\" doesn't correspond to any valid input.".format(del_idx))
- return hosts, None
def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
"""
@@ -127,7 +125,7 @@ OpenShift master service to use as the datastore. This can be later replaced
with a separate etcd instance, if required. If multiple masters are specified,
then a separate etcd cluster is configured with each master serving as a member.
-Any masters configured as part of this installation process are also
+Any masters configured as part of this installation process are also
configured as nodes. This enables the master to proxy to pods
from the API. By default, this node is unschedulable, but this can be changed
after installation with the 'oadm manage-node' command.
@@ -175,7 +173,6 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
hosts.append(host)
-
if print_summary:
print_installation_summary(hosts, oo_cfg.settings['variant_version'])
@@ -318,6 +315,7 @@ hostname.
master_lb = Host(**host_props)
hosts.append(master_lb)
+
def collect_storage_host(hosts):
"""
Get a valid host for storage from the user and append it to the list of
@@ -335,8 +333,8 @@ Note: Containerized storage hosts are not currently supported.
first_master = next(host for host in hosts if host.is_master())
hostname_or_ip = click.prompt('Enter hostname or IP address',
- value_proc=validate_prompt_hostname,
- default=first_master.connect_to)
+ value_proc=validate_prompt_hostname,
+ default=first_master.connect_to)
existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts)
if existing and existing_host.is_node():
existing_host.roles.append('storage')
@@ -347,6 +345,7 @@ Note: Containerized storage hosts are not currently supported.
storage = Host(**host_props)
hosts.append(storage)
+
def is_host_already_node_or_master(hostname, hosts):
is_existing = False
existing_host = None
@@ -358,6 +357,7 @@ def is_host_already_node_or_master(hostname, hosts):
return is_existing, existing_host
+
def confirm_hosts_facts(oo_cfg, callback_facts):
hosts = oo_cfg.deployment.hosts
click.clear()
@@ -432,7 +432,6 @@ Edit %s with the desired values and run `atomic-openshift-installer --unattended
return default_facts
-
def check_hosts_config(oo_cfg, unattended):
click.clear()
masters = [host for host in oo_cfg.deployment.hosts if host.is_master()]
@@ -449,7 +448,7 @@ def check_hosts_config(oo_cfg, unattended):
sys.exit(1)
elif len(master_lb) == 1:
if master_lb[0].is_master() or master_lb[0].is_node():
- click.echo('ERROR: The master load balancer is configured as a master or node. ' \
+ click.echo('ERROR: The master load balancer is configured as a master or node. '
'Please correct this.')
sys.exit(1)
else:
@@ -462,8 +461,8 @@ https://docs.openshift.org/latest/install_config/install/advanced_install.html#m
click.echo(message)
sys.exit(1)
- dedicated_nodes = [host for host in oo_cfg.deployment.hosts \
- if host.is_node() and not host.is_master()]
+ dedicated_nodes = [host for host in oo_cfg.deployment.hosts
+ if host.is_node() and not host.is_master()]
if len(dedicated_nodes) == 0:
message = """
WARNING: No dedicated nodes specified. By default, colocated masters have
@@ -477,6 +476,7 @@ as schedulable.
return
+
def get_variant_and_version(multi_master=False):
message = "\nWhich variant would you like to install?\n\n"
@@ -484,7 +484,7 @@ def get_variant_and_version(multi_master=False):
combos = get_variant_version_combos()
for (variant, version) in combos:
message = "%s\n(%s) %s %s" % (message, i, variant.description,
- version.name)
+ version.name)
i = i + 1
message = "%s\n" % message
@@ -496,12 +496,14 @@ def get_variant_and_version(multi_master=False):
return product, version
+
def confirm_continue(message):
if message:
click.echo(message)
click.confirm("Are you ready to continue?", default=False, abort=True)
return
+
def error_if_missing_info(oo_cfg):
missing_info = False
if not oo_cfg.deployment.hosts:
@@ -541,6 +543,7 @@ def error_if_missing_info(oo_cfg):
if missing_info:
sys.exit(1)
+
def get_host_roles_set(oo_cfg):
roles_set = set()
for host in oo_cfg.deployment.hosts:
@@ -549,6 +552,7 @@ def get_host_roles_set(oo_cfg):
return roles_set
+
def get_proxy_hostnames_and_excludes():
message = """
If a proxy is needed to reach HTTP and HTTPS traffic, please enter the name below.
@@ -577,6 +581,7 @@ Please provide any additional hosts to be added to NO_PROXY. (ENTER for none)
return http_proxy_hostname, https_proxy_hostname, proxy_excludes
+
def get_missing_info_from_user(oo_cfg):
""" Prompts the user for any information missing from the given configuration. """
click.clear()
@@ -604,15 +609,15 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h
confirm_continue(message)
click.clear()
- if not oo_cfg.settings.get('ansible_ssh_user', ''):
- oo_cfg.deployment.variables['ansible_ssh_user'] = \
- get_ansible_ssh_user()
+ if not oo_cfg.deployment.variables.get('ansible_ssh_user', False):
+ oo_cfg.deployment.variables['ansible_ssh_user'] = get_ansible_ssh_user()
click.clear()
if not oo_cfg.settings.get('variant', ''):
variant, version = get_variant_and_version()
oo_cfg.settings['variant'] = variant.name
oo_cfg.settings['variant_version'] = version.name
+ oo_cfg.settings['variant_subtype'] = version.subtype
click.clear()
if not oo_cfg.deployment.hosts:
@@ -623,13 +628,15 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h
oo_cfg.deployment.roles[role] = Role(name=role, variables={})
click.clear()
- if not 'master_routingconfig_subdomain' in oo_cfg.deployment.variables:
- oo_cfg.deployment.variables['master_routingconfig_subdomain'] = \
- get_master_routingconfig_subdomain()
+ if 'master_routingconfig_subdomain' not in oo_cfg.deployment.variables:
+ oo_cfg.deployment.variables['master_routingconfig_subdomain'] = get_master_routingconfig_subdomain()
click.clear()
+ current_version = parse_version(
+ oo_cfg.settings.get('variant_version', '0.0'))
+ min_version = parse_version('3.2')
if not oo_cfg.settings.get('openshift_http_proxy', None) and \
- LooseVersion(oo_cfg.settings.get('variant_version', '0.0')) >= LooseVersion('3.2'):
+ current_version >= min_version:
http_proxy, https_proxy, proxy_excludes = get_proxy_hostnames_and_excludes()
oo_cfg.deployment.variables['proxy_http'] = http_proxy
oo_cfg.deployment.variables['proxy_https'] = https_proxy
@@ -647,10 +654,12 @@ def get_role_variable(oo_cfg, role_name, variable_name):
except (StopIteration, KeyError):
return None
+
def set_role_variable(oo_cfg, role_name, variable_name, variable_value):
target_role = next(role for role in oo_cfg.deployment.roles if role.name is role_name)
target_role[variable_name] = variable_value
+
def collect_new_nodes(oo_cfg):
click.clear()
click.echo('*** New Node Configuration ***')
@@ -661,6 +670,7 @@ Add new nodes here
new_nodes, _ = collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
return new_nodes
+
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
uninstalled_hosts = []
@@ -672,13 +682,15 @@ def get_installed_hosts(hosts, callback_facts):
uninstalled_hosts.append(host)
return installed_hosts, uninstalled_hosts
+
def is_installed_host(host, callback_facts):
version_found = 'common' in callback_facts[host.connect_to].keys() and \
- callback_facts[host.connect_to]['common'].get('version', '') and \
- callback_facts[host.connect_to]['common'].get('version', '') != 'None'
+ callback_facts[host.connect_to]['common'].get('version', '') and \
+ callback_facts[host.connect_to]['common'].get('version', '') != 'None'
return version_found
+
# pylint: disable=too-many-branches
# This pylint error will be corrected shortly in separate PR.
def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
@@ -693,10 +705,10 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
# This check has to happen before we start removing hosts later in this method
if not force:
if not unattended:
- click.echo('By default the installer only adds new nodes ' \
+ click.echo('By default the installer only adds new nodes '
'to an installed environment.')
- response = click.prompt('Do you want to (1) only add additional nodes or ' \
- '(2) reinstall the existing hosts ' \
+ response = click.prompt('Do you want to (1) only add additional nodes or '
+ '(2) reinstall the existing hosts '
'potentially erasing any custom changes?',
type=int)
# TODO: this should be reworked with error handling.
@@ -725,16 +737,16 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
for uninstalled_host in uninstalled_hosts:
click.echo("{} is currently uninstalled".format(uninstalled_host))
# Fall through
- click.echo('\nUninstalled hosts have been detected in your environment. ' \
- 'Please make sure your environment was installed successfully ' \
- 'before adding new nodes. If you want a fresh install, use ' \
+ click.echo('\nUninstalled hosts have been detected in your environment. '
+ 'Please make sure your environment was installed successfully '
+ 'before adding new nodes. If you want a fresh install, use '
'`atomic-openshift-installer install --force`')
sys.exit(1)
else:
if unattended:
if not force:
- click.echo('Installed environment detected and no additional ' \
- 'nodes specified: aborting. If you want a fresh install, use ' \
+ click.echo('Installed environment detected and no additional '
+ 'nodes specified: aborting. If you want a fresh install, use '
'`atomic-openshift-installer install --force`')
sys.exit(1)
else:
@@ -748,14 +760,15 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose)
if error or callback_facts is None:
- click.echo("There was a problem fetching the required information. See " \
+ click.echo("There was a problem fetching the required information. See "
"{} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
else:
- pass # proceeding as normal should do a clean install
+ pass # proceeding as normal should do a clean install
return hosts_to_run_on, callback_facts
+
def set_infra_nodes(hosts):
if all(host.is_master() for host in hosts):
infra_list = hosts
@@ -771,11 +784,11 @@ def set_infra_nodes(hosts):
@click.pass_context
@click.option('--unattended', '-u', is_flag=True, default=False)
@click.option('--configuration', '-c',
- type=click.Path(file_okay=True,
- dir_okay=False,
- writable=True,
- readable=True),
- default=None)
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default=None)
@click.option('--ansible-playbook-directory',
'-a',
type=click.Path(exists=True,
@@ -786,24 +799,27 @@ def set_infra_nodes(hosts):
default=DEFAULT_PLAYBOOK_DIR,
envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
@click.option('--ansible-config',
- type=click.Path(file_okay=True,
- dir_okay=False,
- writable=True,
- readable=True),
- default=None)
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default=None)
@click.option('--ansible-log-path',
- type=click.Path(file_okay=True,
- dir_okay=False,
- writable=True,
- readable=True),
- default="/tmp/ansible.log")
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default="/tmp/ansible.log")
@click.option('-v', '--verbose',
- is_flag=True, default=False)
+ is_flag=True, default=False)
+@click.option('-d', '--debug',
+ help="Enable installer debugging (/tmp/installer.log)",
+ is_flag=True, default=False)
@click.help_option('--help', '-h')
-#pylint: disable=too-many-arguments
-#pylint: disable=line-too-long
+# pylint: disable=too-many-arguments
+# pylint: disable=line-too-long
# Main CLI entrypoint, not much we can do about too many arguments.
-def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
+def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose, debug):
"""
atomic-openshift-installer makes the process for installing OSE or AEP
easier by interactively gathering the data needed to run on each host.
@@ -811,6 +827,14 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
"""
+ if debug:
+ # DEFAULT log level threshold is set to CRITICAL (the
+ # highest), anything below that (we only use debug/warning
+ # presently) is not logged. If '-d' is given though, we'll
+ # lower the threshold to debug (almost everything gets through)
+ installer_log.setLevel(logging.DEBUG)
+ installer_log.debug("Quick Installer debugging initialized")
+
ctx.obj = {}
ctx.obj['unattended'] = unattended
ctx.obj['configuration'] = configuration
@@ -838,7 +862,7 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
if ctx.obj['ansible_config']:
oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config']
elif 'ansible_config' not in oo_cfg.settings and \
- os.path.exists(DEFAULT_ANSIBLE_CONFIG):
+ os.path.exists(DEFAULT_ANSIBLE_CONFIG):
# If we're installed by RPM this file should exist and we can use it as our default:
oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
@@ -879,7 +903,7 @@ def uninstall(ctx):
@click.option('--latest-minor', '-l', is_flag=True, default=False)
@click.option('--next-major', '-n', is_flag=True, default=False)
@click.pass_context
-#pylint: disable=bad-builtin,too-many-statements
+# pylint: disable=too-many-statements
def upgrade(ctx, latest_minor, next_major):
oo_cfg = ctx.obj['oo_cfg']
@@ -922,7 +946,7 @@ def upgrade(ctx, latest_minor, next_major):
if next_major:
if 'major_playbook' not in mapping:
- click.echo("No major upgrade supported for %s %s with this version "\
+ click.echo("No major upgrade supported for %s %s with this version "
"of atomic-openshift-utils." % (variant, old_version))
sys.exit(0)
playbook = mapping['major_playbook']
@@ -935,7 +959,7 @@ def upgrade(ctx, latest_minor, next_major):
if latest_minor:
if 'minor_playbook' not in mapping:
- click.echo("No minor upgrade supported for %s %s with this version "\
+ click.echo("No minor upgrade supported for %s %s with this version "
"of atomic-openshift-utils." % (variant, old_version))
sys.exit(0)
playbook = mapping['minor_playbook']
@@ -957,7 +981,7 @@ def upgrade(ctx, latest_minor, next_major):
ctx.obj['verbose'])
if retcode > 0:
click.echo("Errors encountered during upgrade, please check %s." %
- oo_cfg.settings['ansible_log_path'])
+ oo_cfg.settings['ansible_log_path'])
else:
oo_cfg.save_to_disk()
click.echo("Upgrade completed! Rebooting all hosts is recommended.")
@@ -982,17 +1006,16 @@ def install(ctx, force, gen_inventory):
print_installation_summary(oo_cfg.deployment.hosts, oo_cfg.settings.get('variant_version', None))
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts,
- verbose)
+ verbose)
if error or callback_facts is None:
- click.echo("There was a problem fetching the required information. " \
+ click.echo("There was a problem fetching the required information. "
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
hosts_to_run_on, callback_facts = get_hosts_to_run_on(
oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
-
# We already verified this is not the case for unattended installs, so this can
# only trigger for live CLI users:
# TODO: if there are *new* nodes and this is a live install, we may need the user
@@ -1038,7 +1061,7 @@ installation process.
The installation was successful!
If this is your first time installing please take a look at the Administrator
-Guide for advanced options related to routing, storage, authentication, and
+Guide for advanced options related to routing, storage, authentication, and
more:
http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 69ad2b4c5..393b36f6f 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -2,9 +2,13 @@
import os
import sys
+import logging
import yaml
from pkg_resources import resource_filename
+
+installer_log = logging.getLogger('installer')
+
CONFIG_PERSIST_SETTINGS = [
'ansible_ssh_user',
'ansible_callback_facts_yaml',
@@ -14,8 +18,9 @@ CONFIG_PERSIST_SETTINGS = [
'deployment',
'version',
'variant',
+ 'variant_subtype',
'variant_version',
- ]
+]
DEPLOYMENT_VARIABLES_BLACKLIST = [
'hosts',
@@ -25,6 +30,17 @@ DEPLOYMENT_VARIABLES_BLACKLIST = [
DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname']
+
+def print_read_config_error(error, path='the configuration file'):
+ message = """
+Error loading config. {}.
+
+See https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html#defining-an-installation-configuration-file
+for information on creating a configuration file or delete {} and re-run the installer.
+"""
+ print message.format(error, path)
+
+
class OOConfigFileError(Exception):
"""The provided config file path can't be read/written
"""
@@ -90,7 +106,6 @@ class Host(object):
def is_storage(self):
return 'storage' in self.roles
-
def is_etcd_member(self, all_hosts):
""" Will this host be a member of a standalone etcd cluster. """
if not self.is_master():
@@ -164,30 +179,54 @@ class OOConfig(object):
self._read_config()
self._set_defaults()
-
+ # pylint: disable=too-many-branches
+ # Lots of different checks ran in a single method, could
+ # use a little refactoring-love some time
def _read_config(self):
+ installer_log.debug("Attempting to read the OO Config")
try:
+ installer_log.debug("Attempting to see if the provided config file exists: %s", self.config_path)
if os.path.exists(self.config_path):
+ installer_log.debug("We think the config file exists: %s", self.config_path)
with open(self.config_path, 'r') as cfgfile:
loaded_config = yaml.safe_load(cfgfile.read())
- # Use the presence of a Description as an indicator this is
- # a legacy config file:
- if 'Description' in self.settings:
- self._upgrade_legacy_config()
+ if 'version' not in loaded_config:
+ print_read_config_error('Legacy configuration file found', self.config_path)
+ sys.exit(0)
+
+ if loaded_config.get('version', '') == 'v1':
+ loaded_config = self._upgrade_v1_config(loaded_config)
try:
host_list = loaded_config['deployment']['hosts']
role_list = loaded_config['deployment']['roles']
except KeyError as e:
- print "Error loading config, no such key: {}".format(e)
+ print_read_config_error("No such key: {}".format(e), self.config_path)
+ print "Error loading config, required key missing: {}".format(e)
sys.exit(0)
for setting in CONFIG_PERSIST_SETTINGS:
- try:
- self.settings[setting] = str(loaded_config[setting])
- except KeyError:
- continue
+ persisted_value = loaded_config.get(setting)
+ if persisted_value is not None:
+ self.settings[setting] = str(persisted_value)
+
+ # We've loaded any persisted configs, let's verify any
+ # paths which are required for a correct and complete
+ # install
+
+ # - ansible_callback_facts_yaml - Settings from a
+ # pervious run. If the file doesn't exist then we
+ # will just warn about it for now and recollect the
+ # facts.
+ if self.settings.get('ansible_callback_facts_yaml', None) is not None:
+ if not os.path.exists(self.settings['ansible_callback_facts_yaml']):
+ # Cached callback facts file does not exist
+ installer_log.warning("The specified 'ansible_callback_facts_yaml'"
+ "file does not exist (%s)",
+ self.settings['ansible_callback_facts_yaml'])
+ installer_log.debug("Remote system facts will be collected again later")
+ self.settings.pop('ansible_callback_facts_yaml')
for setting in loaded_config['deployment']:
try:
@@ -205,47 +244,67 @@ class OOConfig(object):
for name, variables in role_list.iteritems():
self.deployment.roles.update({name: Role(name, variables)})
-
except IOError, ferr:
raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
ferr.strerror))
except yaml.scanner.ScannerError:
raise OOConfigFileError(
'Config file "{}" is not a valid YAML document'.format(self.config_path))
+ installer_log.debug("Parsed the config file")
+
+ def _upgrade_v1_config(self, config):
+ new_config_data = {}
+ new_config_data['deployment'] = {}
+ new_config_data['deployment']['hosts'] = []
+ new_config_data['deployment']['roles'] = {}
+ new_config_data['deployment']['variables'] = {}
- def _upgrade_legacy_config(self):
- new_hosts = []
- remove_settings = ['validated_facts', 'Description', 'Name',
- 'Subscription', 'Vendor', 'Version', 'masters', 'nodes']
-
- if 'validated_facts' in self.settings:
- for key, value in self.settings['validated_facts'].iteritems():
- value['connect_to'] = key
- if 'masters' in self.settings and key in self.settings['masters']:
- value['master'] = True
- if 'nodes' in self.settings and key in self.settings['nodes']:
- value['node'] = True
- new_hosts.append(value)
- self.settings['hosts'] = new_hosts
-
- for s in remove_settings:
- if s in self.settings:
- del self.settings[s]
-
- # A legacy config implies openshift-enterprise 3.0:
- self.settings['variant'] = 'openshift-enterprise'
- self.settings['variant_version'] = '3.0'
-
- def _upgrade_v1_config(self):
- #TODO write code to upgrade old config
- return
+ role_list = {}
+
+ if config.get('ansible_ssh_user', False):
+ new_config_data['deployment']['ansible_ssh_user'] = config['ansible_ssh_user']
+
+ if config.get('variant', False):
+ new_config_data['variant'] = config['variant']
+
+ if config.get('variant_version', False):
+ new_config_data['variant_version'] = config['variant_version']
+
+ for host in config['hosts']:
+ host_props = {}
+ host_props['roles'] = []
+ host_props['connect_to'] = host['connect_to']
+
+ for prop in ['ip', 'public_ip', 'hostname', 'public_hostname', 'containerized', 'preconfigured']:
+ host_props[prop] = host.get(prop, None)
+
+ for role in ['master', 'node', 'master_lb', 'storage', 'etcd']:
+ if host.get(role, False):
+ host_props['roles'].append(role)
+ role_list[role] = ''
+
+ new_config_data['deployment']['hosts'].append(host_props)
+
+ new_config_data['deployment']['roles'] = role_list
+
+ return new_config_data
def _set_defaults(self):
+ installer_log.debug("Setting defaults, current OOConfig settings: %s", self.settings)
if 'ansible_inventory_directory' not in self.settings:
self.settings['ansible_inventory_directory'] = self._default_ansible_inv_dir()
+
if not os.path.exists(self.settings['ansible_inventory_directory']):
+ installer_log.debug("'ansible_inventory_directory' does not exist, "
+ "creating it now (%s)",
+ self.settings['ansible_inventory_directory'])
os.makedirs(self.settings['ansible_inventory_directory'])
+ else:
+ installer_log.debug("We think this 'ansible_inventory_directory' "
+ "is OK: %s",
+ self.settings['ansible_inventory_directory'])
+
if 'ansible_plugins_directory' not in self.settings:
self.settings['ansible_plugins_directory'] = \
resource_filename(__name__, 'ansible_plugins')
@@ -253,8 +312,14 @@ class OOConfig(object):
self.settings['version'] = 'v2'
if 'ansible_callback_facts_yaml' not in self.settings:
+ installer_log.debug("No 'ansible_callback_facts_yaml' in self.settings")
self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
self.settings['ansible_inventory_directory']
+ installer_log.debug("Value: %s", self.settings['ansible_callback_facts_yaml'])
+ else:
+ installer_log.debug("'ansible_callback_facts_yaml' already set "
+ "in self.settings: %s",
+ self.settings['ansible_callback_facts_yaml'])
if 'ansible_ssh_user' not in self.settings:
self.settings['ansible_ssh_user'] = ''
@@ -262,11 +327,17 @@ class OOConfig(object):
self.settings['ansible_inventory_path'] = \
'{}/hosts'.format(os.path.dirname(self.config_path))
+ # pylint: disable=consider-iterating-dictionary
+ # Disabled because we shouldn't alter the container we're
+ # iterating over
+ #
# clean up any empty sets
for setting in self.settings.keys():
if not self.settings[setting]:
self.settings.pop(setting)
+ installer_log.debug("Updated OOConfig settings: %s", self.settings)
+
def _default_ansible_inv_dir(self):
return os.path.normpath(
os.path.dirname(self.config_path) + "/.ansible")
@@ -329,7 +400,6 @@ class OOConfig(object):
print "Error persisting settings: {}".format(e)
sys.exit(0)
-
return p_settings
def yaml(self):
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index ef7906828..75d26c10a 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -4,9 +4,12 @@ import socket
import subprocess
import sys
import os
+import logging
import yaml
from ooinstall.variants import find_variant
+installer_log = logging.getLogger('installer')
+
CFG = None
ROLES_TO_GROUPS_MAP = {
@@ -20,16 +23,19 @@ ROLES_TO_GROUPS_MAP = {
VARIABLES_MAP = {
'ansible_ssh_user': 'ansible_ssh_user',
'deployment_type': 'deployment_type',
- 'master_routingconfig_subdomain':'openshift_master_default_subdomain',
- 'proxy_http':'openshift_http_proxy',
+ 'variant_subtype': 'deployment_subtype',
+ 'master_routingconfig_subdomain': 'openshift_master_default_subdomain',
+ 'proxy_http': 'openshift_http_proxy',
'proxy_https': 'openshift_https_proxy',
'proxy_exclude_hosts': 'openshift_no_proxy',
}
+
def set_config(cfg):
global CFG
CFG = cfg
+
def generate_inventory(hosts):
global CFG
@@ -48,8 +54,7 @@ def generate_inventory(hosts):
write_inventory_vars(base_inventory, multiple_masters, lb)
-
- #write_inventory_hosts
+ # write_inventory_hosts
for role in CFG.deployment.roles:
# write group block
group = ROLES_TO_GROUPS_MAP.get(role, role)
@@ -68,15 +73,17 @@ def generate_inventory(hosts):
base_inventory.close()
return base_inventory_path
+
def determine_lb_configuration(hosts):
lb = next((host for host in hosts if host.is_master_lb()), None)
if lb:
- if lb.hostname == None:
+ if lb.hostname is None:
lb.hostname = lb.connect_to
lb.public_hostname = lb.connect_to
return lb
+
def write_inventory_children(base_inventory, scaleup):
global CFG
@@ -114,28 +121,30 @@ def write_inventory_vars(base_inventory, multiple_masters, lb):
"openshift_master_cluster_public_hostname={}\n".format(lb.public_hostname))
if CFG.settings.get('variant_version', None) == '3.1':
- #base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version')))
+ # base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version')))
base_inventory.write('openshift_image_tag=v{}\n'.format('3.1.1.6'))
write_proxy_settings(base_inventory)
# Find the correct deployment type for ansible:
ver = find_variant(CFG.settings['variant'],
- version=CFG.settings.get('variant_version', None))[1]
+ version=CFG.settings.get('variant_version', None))[1]
base_inventory.write('deployment_type={}\n'.format(ver.ansible_key))
+ if getattr(ver, 'variant_subtype', False):
+ base_inventory.write('deployment_subtype={}\n'.format(ver.deployment_subtype))
if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ:
- base_inventory.write('openshift_docker_additional_registries={}\n'
- .format(os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES']))
+ base_inventory.write('openshift_docker_additional_registries={}\n'.format(
+ os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES']))
if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ:
- base_inventory.write('openshift_docker_insecure_registries={}\n'
- .format(os.environ['OO_INSTALL_INSECURE_REGISTRIES']))
+ base_inventory.write('openshift_docker_insecure_registries={}\n'.format(
+ os.environ['OO_INSTALL_INSECURE_REGISTRIES']))
if 'OO_INSTALL_PUDDLE_REPO' in os.environ:
# We have to double the '{' here for literals
base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', "
- "'name': 'ose-devel', "
- "'baseurl': '{}', "
- "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
+ "'name': 'ose-devel', "
+ "'baseurl': '{}', "
+ "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
for name, role_obj in CFG.deployment.roles.iteritems():
if role_obj.variables:
@@ -151,17 +160,17 @@ def write_inventory_vars(base_inventory, multiple_masters, lb):
def write_proxy_settings(base_inventory):
try:
base_inventory.write("openshift_http_proxy={}\n".format(
- CFG.settings['openshift_http_proxy']))
+ CFG.settings['openshift_http_proxy']))
except KeyError:
pass
try:
base_inventory.write("openshift_https_proxy={}\n".format(
- CFG.settings['openshift_https_proxy']))
+ CFG.settings['openshift_https_proxy']))
except KeyError:
pass
try:
base_inventory.write("openshift_no_proxy={}\n".format(
- CFG.settings['openshift_no_proxy']))
+ CFG.settings['openshift_no_proxy']))
except KeyError:
pass
@@ -191,7 +200,6 @@ def write_host(host, role, inventory, schedulable=None):
if role == 'node':
facts += ' openshift_node_labels="{}"'.format(host.node_labels)
-
# Distinguish between three states, no schedulability specified (use default),
# explicitly set to True, or explicitly set to False:
if role != 'node' or schedulable is None:
@@ -216,17 +224,21 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
"""
Retrieves system facts from the remote systems.
"""
+ installer_log.debug("Inside load_system_facts")
FNULL = open(os.devnull, 'w')
args = ['ansible-playbook', '-v'] if verbose \
else ['ansible-playbook']
args.extend([
'--inventory-file={}'.format(inventory_file),
os_facts_path])
+ installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args))
status = subprocess.call(args, env=env_vars, stdout=FNULL)
- if not status == 0:
+ if status != 0:
+ installer_log.debug("Exit status from subprocess was not 0")
return [], 1
with open(CFG.settings['ansible_callback_facts_yaml'], 'r') as callback_facts_file:
+ installer_log.debug("Going to try to read this file: %s", CFG.settings['ansible_callback_facts_yaml'])
try:
callback_facts = yaml.safe_load(callback_facts_file)
except yaml.YAMLError, exc:
@@ -239,6 +251,7 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
def default_facts(hosts, verbose=False):
global CFG
+ installer_log.debug("Current global CFG vars here: %s", CFG)
inventory_file = generate_inventory(hosts)
os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
@@ -250,6 +263,9 @@ def default_facts(hosts, verbose=False):
facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+
+ installer_log.debug("facts_env: %s", facts_env)
+ installer_log.debug("Going to 'load_system_facts' next")
return load_system_facts(inventory_file, os_facts_path, facts_env, verbose)
@@ -280,7 +296,7 @@ def run_ansible(playbook, inventory, env_vars, verbose=False):
def run_uninstall_playbook(hosts, verbose=False):
playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
- 'playbooks/adhoc/uninstall.yml')
+ 'playbooks/adhoc/uninstall.yml')
inventory_file = generate_inventory(hosts)
facts_env = os.environ.copy()
if 'ansible_log_path' in CFG.settings:
@@ -292,7 +308,7 @@ def run_uninstall_playbook(hosts, verbose=False):
def run_upgrade_playbook(hosts, playbook, verbose=False):
playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
- 'playbooks/byo/openshift-cluster/upgrades/{}'.format(playbook))
+ 'playbooks/byo/openshift-cluster/upgrades/{}'.format(playbook))
# TODO: Upgrade inventory for upgrade?
inventory_file = generate_inventory(hosts)
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
index b32370cd5..6993794fe 100644
--- a/utils/src/ooinstall/variants.py
+++ b/utils/src/ooinstall/variants.py
@@ -11,12 +11,16 @@ to be specified by the user, and to point the generic variants to the latest
version.
"""
+import logging
+installer_log = logging.getLogger('installer')
+
class Version(object):
- def __init__(self, name, ansible_key):
+ def __init__(self, name, ansible_key, subtype=''):
self.name = name # i.e. 3.0, 3.1
self.ansible_key = ansible_key
+ self.subtype = subtype
class Variant(object):
@@ -35,28 +39,35 @@ class Variant(object):
# WARNING: Keep the versions ordered, most recent first:
OSE = Variant('openshift-enterprise', 'OpenShift Container Platform',
- [
- Version('3.3', 'openshift-enterprise'),
- ]
+ [
+ Version('3.3', 'openshift-enterprise'),
+ ]
+)
+
+REG = Variant('openshift-enterprise', 'Registry',
+ [
+ Version('3.3', 'openshift-enterprise', 'registry'),
+ ]
)
origin = Variant('origin', 'OpenShift Origin',
- [
- Version('1.2', 'origin'),
- ]
+ [
+ Version('1.2', 'origin'),
+ ]
)
LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform',
- [
- Version('3.2', 'openshift-enterprise'),
- Version('3.1', 'openshift-enterprise'),
- Version('3.0', 'openshift-enterprise'),
- ]
+ [
+ Version('3.2', 'openshift-enterprise'),
+ Version('3.1', 'openshift-enterprise'),
+ Version('3.0', 'openshift-enterprise'),
+ ]
)
# Ordered list of variants we can install, first is the default.
-SUPPORTED_VARIANTS = (OSE, origin, LEGACY)
-DISPLAY_VARIANTS = (OSE, )
+SUPPORTED_VARIANTS = (OSE, REG, origin, LEGACY)
+DISPLAY_VARIANTS = (OSE, REG,)
+
def find_variant(name, version=None):
"""
@@ -75,6 +86,7 @@ def find_variant(name, version=None):
return (None, None)
+
def get_variant_version_combos():
combos = []
for variant in DISPLAY_VARIANTS:
diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt
new file mode 100644
index 000000000..f2216a177
--- /dev/null
+++ b/utils/test-requirements.txt
@@ -0,0 +1,11 @@
+enum
+configparser
+pylint
+pep8
+nose
+coverage
+mock
+flake8
+PyYAML
+click
+backports.functools_lru_cache
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 0556e52a1..6d9d443ff 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -101,6 +101,7 @@ MOCK_FACTS_QUICKHA = {
# Missing connect_to on some hosts:
BAD_CONFIG = """
variant: %s
+version: v2
deployment:
ansible_ssh_user: root
hosts:
@@ -132,6 +133,7 @@ deployment:
QUICKHA_CONFIG = """
variant: %s
+version: v2
deployment:
ansible_ssh_user: root
hosts:
@@ -189,6 +191,7 @@ deployment:
QUICKHA_2_MASTER_CONFIG = """
variant: %s
+version: v2
deployment:
ansible_ssh_user: root
hosts:
@@ -238,6 +241,7 @@ deployment:
QUICKHA_CONFIG_REUSED_LB = """
variant: %s
+version: v2
deployment:
ansible_ssh_user: root
hosts:
@@ -281,6 +285,7 @@ deployment:
QUICKHA_CONFIG_NO_LB = """
variant: %s
+version: v2
deployment:
ansible_ssh_user: root
hosts:
@@ -323,6 +328,7 @@ deployment:
QUICKHA_CONFIG_PRECONFIGURED_LB = """
variant: %s
+version: v2
deployment:
ansible_ssh_user: root
hosts:
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index b2a0a7134..a883e5c56 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -12,6 +12,7 @@ SAMPLE_CONFIG = """
variant: %s
variant_version: 3.3
master_routingconfig_subdomain: example.com
+version: v2
deployment:
ansible_ssh_user: root
hosts:
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index f82d55b05..b5068cc14 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -13,6 +13,7 @@ from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError
SAMPLE_CONFIG = """
variant: openshift-enterprise
variant_version: 3.3
+version: v2
deployment:
ansible_ssh_user: root
hosts:
@@ -43,27 +44,9 @@ deployment:
node:
"""
-# Used to test automatic upgrading of config:
-LEGACY_CONFIG = """
-Description: This is the configuration file for the OpenShift Ansible-Based Installer.
-Name: OpenShift Ansible-Based Installer Configuration
-Subscription: {type: none}
-Vendor: OpenShift Community
-Version: 0.0.1
-ansible_config: /tmp/notreal/ansible.cfg
-ansible_inventory_directory: /tmp/notreal/.config/openshift/.ansible
-ansible_log_path: /tmp/ansible.log
-ansible_plugins_directory: /tmp/notreal/.python-eggs/ooinstall-3.0.0-py2.7.egg-tmp/ooinstall/ansible_plugins
-masters: [10.0.0.1]
-nodes: [10.0.0.2, 10.0.0.3]
-validated_facts:
- 10.0.0.1: {hostname: master-private.example.com, ip: 10.0.0.1, public_hostname: master.example.com, public_ip: 24.222.0.1}
- 10.0.0.2: {hostname: node1-private.example.com, ip: 10.0.0.2, public_hostname: node1.example.com, public_ip: 24.222.0.2}
- 10.0.0.3: {hostname: node2-private.example.com, ip: 10.0.0.3, public_hostname: node2.example.com, public_ip: 24.222.0.3}
-"""
-
CONFIG_INCOMPLETE_FACTS = """
+version: v2
deployment:
ansible_ssh_user: root
hosts:
@@ -91,6 +74,7 @@ deployment:
CONFIG_BAD = """
variant: openshift-enterprise
+version: v2
deployment:
ansible_ssh_user: root
hosts: