summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/cockpit-ui/tasks/main.yml13
-rw-r--r--roles/etcd/templates/etcd.conf.j22
-rw-r--r--roles/etcd/templates/etcd.docker.service2
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py4
-rw-r--r--roles/lib_openshift/src/class/oc_adm_ca_server_cert.py4
-rw-r--r--roles/openshift_certificate_expiry/README.md133
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py4
-rwxr-xr-xroles/openshift_health_checker/library/aos_version.py214
-rw-r--r--roles/openshift_health_checker/meta/main.yml1
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py15
-rw-r--r--roles/openshift_health_checker/test/aos_version_test.py120
-rw-r--r--roles/openshift_health_checker/test/conftest.py3
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py14
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py35
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml6
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/glusterfs.yml51
-rw-r--r--roles/openshift_logging/vars/openshift-enterprise.yml2
-rwxr-xr-xroles/openshift_metrics/files/import_jks_certs.sh52
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml67
-rw-r--r--roles/openshift_metrics/tasks/import_jks_certs.yaml37
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml2
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j224
-rw-r--r--roles/openshift_metrics/templates/heapster.j218
-rw-r--r--roles/openshift_metrics/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_node/tasks/main.yml27
-rw-r--r--roles/openshift_node_upgrade/meta/main.yml1
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml27
-rw-r--r--roles/openshift_provisioners/README.md29
-rw-r--r--roles/openshift_provisioners/defaults/main.yaml12
-rw-r--r--roles/openshift_provisioners/meta/main.yaml16
-rw-r--r--roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml19
-rw-r--r--roles/openshift_provisioners/tasks/generate_secrets.yaml14
-rw-r--r--roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml12
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml70
-rw-r--r--roles/openshift_provisioners/tasks/install_provisioners.yaml55
-rw-r--r--roles/openshift_provisioners/tasks/install_support.yaml24
-rw-r--r--roles/openshift_provisioners/tasks/main.yaml27
-rw-r--r--roles/openshift_provisioners/tasks/oc_apply.yaml51
-rw-r--r--roles/openshift_provisioners/tasks/start_cluster.yaml20
-rw-r--r--roles/openshift_provisioners/tasks/stop_cluster.yaml20
-rw-r--r--roles/openshift_provisioners/tasks/uninstall_provisioners.yaml43
-rw-r--r--roles/openshift_provisioners/templates/clusterrolebinding.j230
-rw-r--r--roles/openshift_provisioners/templates/efs.j258
-rw-r--r--roles/openshift_provisioners/templates/pv.j232
-rw-r--r--roles/openshift_provisioners/templates/pvc.j226
-rw-r--r--roles/openshift_provisioners/templates/secret.j215
-rw-r--r--roles/openshift_provisioners/templates/serviceaccount.j216
-rw-r--r--roles/openshift_repos/README.md8
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml34
-rw-r--r--roles/openshift_storage_glusterfs/README.md60
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml17
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml115
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml10
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml128
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml113
-rw-r--r--roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py23
-rw-r--r--roles/openshift_storage_glusterfs/meta/main.yml15
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml107
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml48
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml41
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml109
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml182
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j211
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j239
-rw-r--r--roles/openshift_version/tasks/main.yml56
66 files changed, 2302 insertions, 287 deletions
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index 8bd68787a..0114498f8 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -1,13 +1,16 @@
---
- block:
- - name: Create passthrough route for docker-registry
+
+ # When openshift_hosted_manage_registry=true the openshift_hosted
+ # role will create the appropriate route for the docker-registry.
+ # When openshift_hosted_manage_registry=false then this code will
+ # not be run.
+ - name: fetch the docker-registry route
oc_route:
kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"
name: docker-registry
namespace: default
- service_name: docker-registry
- state: present
- tls_termination: passthrough
+ state: list
register: docker_registry_route
- name: Create passthrough route for registry-console
@@ -41,7 +44,7 @@
{% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
{% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
-p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
- -p REGISTRY_HOST="{{ docker_registry_route.results.results[0].spec.host }}"
+ -p REGISTRY_HOST="{{ docker_registry_route.results[0].spec.host }}"
-p COCKPIT_KUBE_URL="https://{{ registry_console_cockpit_kube.results.results[0].spec.host }}"
--config={{ openshift_hosted_kubeconfig }}
-n default
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 9151dd0bd..1b5598f46 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -62,7 +62,7 @@ ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}
{% endif -%}
#[logging]
-ETCD_DEBUG="{{ etcd_debug | default(false) | string }}"
+ETCD_DEBUG="{{ etcd_debug | default(false) | bool | string }}"
{% if etcd_log_package_levels is defined %}
ETCD_LOG_PACKAGE_LEVELS="{{ etcd_log_package_levels }}"
{% endif %}
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index ae059b549..e4d1b57e6 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -7,7 +7,7 @@ PartOf=docker.service
[Service]
EnvironmentFile=/etc/etcd/etcd.conf
ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
-ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --env-file=/etc/sysconfig/etcd --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
ExecStop=/usr/bin/docker stop {{ etcd_service }}
SyslogIdentifier=etcd_container
Restart=always
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 617d8590a..4d083c4d5 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -1508,7 +1508,7 @@ class CAServerCert(OpenShiftCLI):
x509output, _ = proc.communicate()
if proc.returncode == 0:
regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE)
- match = regex.search(x509output) # E501
+ match = regex.search(x509output.decode()) # E501
if not match:
return False
@@ -1558,7 +1558,7 @@ class CAServerCert(OpenShiftCLI):
api_rval = server_cert.create()
if api_rval['returncode'] != 0:
- return {'Failed': True, 'msg': api_rval}
+ return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
diff --git a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
index 018ce8d42..cf99a6584 100644
--- a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
@@ -77,7 +77,7 @@ class CAServerCert(OpenShiftCLI):
x509output, _ = proc.communicate()
if proc.returncode == 0:
regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE)
- match = regex.search(x509output) # E501
+ match = regex.search(x509output.decode()) # E501
if not match:
return False
@@ -127,7 +127,7 @@ class CAServerCert(OpenShiftCLI):
api_rval = server_cert.create()
if api_rval['returncode'] != 0:
- return {'Failed': True, 'msg': api_rval}
+ return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index df43c3770..107e27f89 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -19,7 +19,6 @@ to be used with an inventory that is representative of the
cluster. For best results run `ansible-playbook` with the `-v` option.
-
# Role Variables
Core variables in this role:
@@ -51,8 +50,8 @@ How to use the Certificate Expiration Checking Role.
Run one of the example playbooks using an inventory file
representative of your existing cluster. Some example playbooks are
-included in this role, or you can read on below after this example to
-craft you own.
+included in this role, or you can [read on below for more examples](#more-example-playbooks)
+to help you craft you own.
```
$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode.yaml
@@ -69,11 +68,47 @@ Using the `easy-mode.yaml` playbook will produce:
> `/usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode.yaml`
> instead
+## Run from a container
+
+The example playbooks that use this role are packaged in the
+[container image for openshift-ansible](../../README_CONTAINER_IMAGE.md), so you
+can run any of them by setting the `PLAYBOOK_FILE` environment variable when
+running an openshift-ansible container.
+
+There are several [examples](../../examples/README.md) in the `examples` directory that run certificate check playbooks from a container running on OpenShift.
+
## More Example Playbooks
> **Note:** These Playbooks are available to run directly out of the
> [/playbooks/certificate_expiry/](../../playbooks/certificate_expiry/) directory.
+### Default behavior
+
+This playbook just invokes the certificate expiration check role with default options:
+
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+**From git:**
+```
+$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/default.yaml
+```
+**From openshift-ansible-playbooks rpm:**
+```
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/default.yaml
+```
+
+> [View This Playbook](../../playbooks/certificate_expiry/default.yaml)
+
+### Easy mode
This example playbook is great if you're just wanting to **try the
role out**. This playbook enables HTML and JSON reports. All
@@ -104,35 +139,70 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce
> [View This Playbook](../../playbooks/certificate_expiry/easy-mode.yaml)
-***
+### Easy mode and upload reports to masters
+
+This example builds on top of [easy-mode.yaml](#easy-mode) and additionally
+uploads a copy of the generated reports to the masters, with a timestamp in the
+file names.
+
+This is specially useful when the playbook runs from within a container, because
+the reports are generated inside the container and we need a way to access them.
+Uploading a copy of the reports to the masters is one way to make it easy to
+access them. Alternatively you can use the
+[role variables](#role-variables) that control the path of the generated reports
+to point to a container volume (see the [playbook with custom paths](#generate-html-and-json-reports-in-a-custom-path) for an example).
-Default behavior:
+With the container use case in mind, this playbook allows control over some
+options via environment variables:
+
+ - `CERT_EXPIRY_WARN_DAYS`: sets `openshift_certificate_expiry_warning_days`, overriding the role's default.
+ - `COPY_TO_PATH`: path in the masters where generated reports are uploaded.
```yaml
---
-- name: Check cert expirys
+- name: Generate certificate expiration reports
hosts: nodes:masters:etcd
- become: yes
gather_facts: no
+ vars:
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ openshift_certificate_expiry_warning_days: "{{ lookup('env', 'CERT_EXPIRY_WARN_DAYS') | default('45', true) }}"
roles:
- role: openshift_certificate_expiry
+
+- name: Upload reports to master
+ hosts: masters
+ gather_facts: no
+ vars:
+ destination_path: "{{ lookup('env', 'COPY_TO_PATH') | default('/etc/origin/certificate_expiration_report', true) }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ tasks:
+ - name: Create directory in masters
+ file:
+ path: "{{ destination_path }}"
+ state: directory
+ - name: Copy the reports to the masters
+ copy:
+ dest: "{{ destination_path }}/{{ timestamp }}-{{ item }}"
+ src: "/tmp/{{ item }}"
+ with_items:
+ - "cert-expiry-report.html"
+ - "cert-expiry-report.json"
```
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode-upload.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode-upload.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/default.yaml)
+> [View This Playbook](../../playbooks/certificate_expiry/easy-mode-upload.yaml)
-***
-
-
-Generate HTML and JSON artifacts in their default paths:
+### Generate HTML and JSON artifacts in their default paths
```yaml
---
@@ -158,7 +228,38 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce
> [View This Playbook](../../playbooks/certificate_expiry/html_and_json_default_paths.yaml)
-***
+### Generate HTML and JSON reports in a custom path
+
+This example customizes the report generation path to point to a specific path (`/var/lib/certcheck`) and uses a date timestamp for the generated files. This allows you to reuse a certain location to keep multiple copies of the reports.
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ openshift_certificate_expiry_html_report_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.html"
+ openshift_certificate_expiry_json_results_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.json"
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+**From git:**
+```
+$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/html_and_json_timestamp.yaml
+```
+**From openshift-ansible-playbooks rpm:**
+```
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/html_and_json_timestamp.yaml
+```
+
+> [View This Playbook](../../playbooks/certificate_expiry/html_and_json_timestamp.yaml)
+
+### Long warning window
Change the expiration warning window to 1500 days (good for testing
the module out):
@@ -186,7 +287,7 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce
> [View This Playbook](../../playbooks/certificate_expiry/longer_warning_period.yaml)
-***
+### Long warning window and JSON report
Change the expiration warning window to 1500 days (good for testing
the module out) and save the results as a JSON file:
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 7edf141e5..adeb85c3f 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -2155,6 +2155,10 @@ class OpenShiftFacts(object):
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'),
+ glusterfs=dict(
+ endpoints='glusterfs-registry-endpoints',
+ path='glusterfs-registry-volume',
+ readOnly=False),
host=None,
access=dict(
modes=['ReadWriteMany']
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index 191a4b107..a46589443 100755
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -1,91 +1,199 @@
#!/usr/bin/python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
-Ansible module for determining if multiple versions of an OpenShift package are
-available, and if the version requested is available down to the given
-precision.
-
-Multiple versions available suggest that multiple repos are enabled for the
-different versions, which may cause installation problems.
+Ansible module for yum-based systems determining if multiple releases
+of an OpenShift package are available, and if the release requested
+(if any) is available down to the given precision.
+
+For Enterprise, multiple releases available suggest that multiple repos
+are enabled for the different releases, which may cause installation
+problems. With Origin, however, this is a normal state of affairs as
+all the releases are provided in a single repo with the expectation that
+only the latest can be installed.
+
+Code in the openshift_version role contains a lot of logic to pin down
+the exact package and image version to use and so does some validation
+of release availability already. Without duplicating all that, we would
+like the user to have a helpful error message if we detect things will
+not work out right. Note that if openshift_release is not specified in
+the inventory, the version comparison checks just pass.
+
+TODO: fail gracefully on non-yum systems (dnf in Fedora)
'''
-import yum # pylint: disable=import-error
-
from ansible.module_utils.basic import AnsibleModule
+IMPORT_EXCEPTION = None
+try:
+ import yum # pylint: disable=import-error
+except ImportError as err:
+ IMPORT_EXCEPTION = err # in tox test env, yum import fails
+
-def main(): # pylint: disable=missing-docstring,too-many-branches
+class AosVersionException(Exception):
+ '''Base exception class for package version problems'''
+ def __init__(self, message, problem_pkgs=None):
+ Exception.__init__(self, message)
+ self.problem_pkgs = problem_pkgs
+
+
+def main():
+ '''Entrypoint for this Ansible module'''
module = AnsibleModule(
argument_spec=dict(
- prefix=dict(required=True), # atomic-openshift, origin, ...
- version=dict(required=True),
+ requested_openshift_release=dict(type="str", default=''),
+ openshift_deployment_type=dict(required=True),
+ rpm_prefix=dict(required=True), # atomic-openshift, origin, ...?
),
supports_check_mode=True
)
- def bail(error): # pylint: disable=missing-docstring
- module.fail_json(msg=error)
-
- rpm_prefix = module.params['prefix']
+ if IMPORT_EXCEPTION:
+ module.fail_json(msg="aos_version module could not import yum: %s" % IMPORT_EXCEPTION)
+ # determine the packages we will look for
+ rpm_prefix = module.params['rpm_prefix']
if not rpm_prefix:
- bail("prefix must not be empty")
-
- yb = yum.YumBase() # pylint: disable=invalid-name
- yb.conf.disable_excludes = ["all"] # assume the openshift excluder will be managed, ignore current state
-
- # search for package versions available for aos pkgs
- expected_pkgs = [
+ module.fail_json(msg="rpm_prefix must not be empty")
+ expected_pkgs = set([
rpm_prefix,
rpm_prefix + '-master',
rpm_prefix + '-node',
- ]
+ ])
+
+ # determine what level of precision the user specified for the openshift version.
+ # should look like a version string with possibly many segments e.g. "3.4.1":
+ requested_openshift_release = module.params['requested_openshift_release']
+
+ # get the list of packages available and complain if anything is wrong
+ try:
+ pkgs = _retrieve_available_packages(expected_pkgs)
+ if requested_openshift_release:
+ _check_precise_version_found(pkgs, expected_pkgs, requested_openshift_release)
+ _check_higher_version_found(pkgs, expected_pkgs, requested_openshift_release)
+ if module.params['openshift_deployment_type'] in ['openshift-enterprise']:
+ _check_multi_minor_release(pkgs, expected_pkgs)
+ except AosVersionException as excinfo:
+ module.fail_json(msg=str(excinfo))
+ module.exit_json(changed=False)
+
+
+def _retrieve_available_packages(expected_pkgs):
+ # search for package versions available for openshift pkgs
+ yb = yum.YumBase() # pylint: disable=invalid-name
+
+ # The openshift excluder prevents unintended updates to openshift
+ # packages by setting yum excludes on those packages. See:
+ # https://wiki.centos.org/SpecialInterestGroup/PaaS/OpenShift-Origin-Control-Updates
+ # Excludes are then disabled during an install or upgrade, but
+ # this check will most likely be running outside either. When we
+ # attempt to determine what packages are available via yum they may
+ # be excluded. So, for our purposes here, disable excludes to see
+ # what will really be available during an install or upgrade.
+ yb.conf.disable_excludes = ['all']
+
try:
pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs)
- except yum.Errors.PackageSackError as e: # pylint: disable=invalid-name
+ except yum.Errors.PackageSackError as excinfo:
# you only hit this if *none* of the packages are available
- bail('Unable to find any OpenShift packages.\nCheck your subscription and repo settings.\n%s' % e)
+ raise AosVersionException('\n'.join([
+ 'Unable to find any OpenShift packages.',
+ 'Check your subscription and repo settings.',
+ str(excinfo),
+ ]))
+ return pkgs
- # determine what level of precision we're expecting for the version
- expected_version = module.params['version']
- if expected_version.startswith('v'): # v3.3 => 3.3
- expected_version = expected_version[1:]
- num_dots = expected_version.count('.')
- pkgs_by_name_version = {}
+class PreciseVersionNotFound(AosVersionException):
+ '''Exception for reporting packages not available at given release'''
+ def __init__(self, requested_release, not_found):
+ msg = ['Not all of the required packages are available at requested version %s:' % requested_release]
+ msg += [' ' + name for name in not_found]
+ msg += ['Please check your subscriptions and enabled repositories.']
+ AosVersionException.__init__(self, '\n'.join(msg), not_found)
+
+
+def _check_precise_version_found(pkgs, expected_pkgs, requested_openshift_release):
+ # see if any packages couldn't be found at requested release version
+ # we would like to verify that the latest available pkgs have however specific a version is given.
+ # so e.g. if there is a package version 3.4.1.5 the check passes; if only 3.4.0, it fails.
+
pkgs_precise_version_found = {}
for pkg in pkgs:
- # get expected version precision
- match_version = '.'.join(pkg.version.split('.')[:num_dots + 1])
- if match_version == expected_version:
+ if pkg.name not in expected_pkgs:
+ continue
+ # does the version match, to the precision requested?
+ # and, is it strictly greater, at the precision requested?
+ match_version = '.'.join(pkg.version.split('.')[:requested_openshift_release.count('.') + 1])
+ if match_version == requested_openshift_release:
pkgs_precise_version_found[pkg.name] = True
- # get x.y version precision
- minor_version = '.'.join(pkg.version.split('.')[:2])
- if pkg.name not in pkgs_by_name_version:
- pkgs_by_name_version[pkg.name] = {}
- pkgs_by_name_version[pkg.name][minor_version] = True
- # see if any packages couldn't be found at requested version
- # see if any packages are available in more than one minor version
not_found = []
- multi_found = []
for name in expected_pkgs:
if name not in pkgs_precise_version_found:
not_found.append(name)
+
+ if not_found:
+ raise PreciseVersionNotFound(requested_openshift_release, not_found)
+
+
+class FoundHigherVersion(AosVersionException):
+ '''Exception for reporting that a higher version than requested is available'''
+ def __init__(self, requested_release, higher_found):
+ msg = ['Some required package(s) are available at a version',
+ 'that is higher than requested %s:' % requested_release]
+ msg += [' ' + name for name in higher_found]
+ msg += ['This will prevent installing the version you requested.']
+ msg += ['Please check your enabled repositories or adjust openshift_release.']
+ AosVersionException.__init__(self, '\n'.join(msg), higher_found)
+
+
+def _check_higher_version_found(pkgs, expected_pkgs, requested_openshift_release):
+ req_release_arr = [int(segment) for segment in requested_openshift_release.split(".")]
+ # see if any packages are available in a version higher than requested
+ higher_version_for_pkg = {}
+ for pkg in pkgs:
+ if pkg.name not in expected_pkgs:
+ continue
+ version = [int(segment) for segment in pkg.version.split(".")]
+ too_high = version[:len(req_release_arr)] > req_release_arr
+ higher_than_seen = version > higher_version_for_pkg.get(pkg.name, [])
+ if too_high and higher_than_seen:
+ higher_version_for_pkg[pkg.name] = version
+
+ if higher_version_for_pkg:
+ higher_found = []
+ for name, version in higher_version_for_pkg.items():
+ higher_found.append(name + '-' + '.'.join(str(segment) for segment in version))
+ raise FoundHigherVersion(requested_openshift_release, higher_found)
+
+
+class FoundMultiRelease(AosVersionException):
+ '''Exception for reporting multiple minor releases found for same package'''
+ def __init__(self, multi_found):
+ msg = ['Multiple minor versions of these packages are available']
+ msg += [' ' + name for name in multi_found]
+ msg += ["There should only be one OpenShift release repository enabled at a time."]
+ AosVersionException.__init__(self, '\n'.join(msg), multi_found)
+
+
+def _check_multi_minor_release(pkgs, expected_pkgs):
+ # see if any packages are available in more than one minor version
+ pkgs_by_name_version = {}
+ for pkg in pkgs:
+ # keep track of x.y (minor release) versions seen
+ minor_release = '.'.join(pkg.version.split('.')[:2])
+ if pkg.name not in pkgs_by_name_version:
+ pkgs_by_name_version[pkg.name] = {}
+ pkgs_by_name_version[pkg.name][minor_release] = True
+
+ multi_found = []
+ for name in expected_pkgs:
if name in pkgs_by_name_version and len(pkgs_by_name_version[name]) > 1:
multi_found.append(name)
- if not_found:
- msg = 'Not all of the required packages are available at requested version %s:\n' % expected_version
- for name in not_found:
- msg += ' %s\n' % name
- bail(msg + 'Please check your subscriptions and enabled repositories.')
- if multi_found:
- msg = 'Multiple minor versions of these packages are available\n'
- for name in multi_found:
- msg += ' %s\n' % name
- bail(msg + "There should only be one OpenShift version's repository enabled at a time.")
- module.exit_json(changed=False)
+ if multi_found:
+ raise FoundMultiRelease(multi_found)
if __name__ == '__main__':
diff --git a/roles/openshift_health_checker/meta/main.yml b/roles/openshift_health_checker/meta/main.yml
index 0bbeadd34..cd9b55902 100644
--- a/roles/openshift_health_checker/meta/main.yml
+++ b/roles/openshift_health_checker/meta/main.yml
@@ -1,3 +1,4 @@
---
dependencies:
- role: openshift_facts
+ - role: openshift_repos
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 9891972a6..a7eb720fd 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -9,6 +9,10 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
name = "package_availability"
tags = ["preflight"]
+ @classmethod
+ def is_active(cls, task_vars):
+ return super(PackageAvailability, cls).is_active(task_vars) and task_vars["ansible_pkg_mgr"] == "yum"
+
def run(self, tmp, task_vars):
rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
group_names = get_var(task_vars, "group_names", default=[])
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 42193a1c6..682f6bd40 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -9,12 +9,17 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
name = "package_version"
tags = ["preflight"]
- def run(self, tmp, task_vars):
- rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
- openshift_release = get_var(task_vars, "openshift_release")
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts that do not have package requirements."""
+ group_names = get_var(task_vars, "group_names", default=[])
+ master_or_node = 'masters' in group_names or 'nodes' in group_names
+ return super(PackageVersion, cls).is_active(task_vars) and master_or_node
+ def run(self, tmp, task_vars):
args = {
- "prefix": rpm_prefix,
- "version": openshift_release,
+ "requested_openshift_release": get_var(task_vars, "openshift_release", default=''),
+ "openshift_deployment_type": get_var(task_vars, "openshift_deployment_type"),
+ "rpm_prefix": get_var(task_vars, "openshift", "common", "service_type"),
}
return self.execute_module("aos_version", args, tmp, task_vars)
diff --git a/roles/openshift_health_checker/test/aos_version_test.py b/roles/openshift_health_checker/test/aos_version_test.py
new file mode 100644
index 000000000..39c86067a
--- /dev/null
+++ b/roles/openshift_health_checker/test/aos_version_test.py
@@ -0,0 +1,120 @@
+import pytest
+import aos_version
+
+from collections import namedtuple
+Package = namedtuple('Package', ['name', 'version'])
+
+expected_pkgs = set(['spam', 'eggs'])
+
+
+@pytest.mark.parametrize('pkgs, requested_release, expect_not_found', [
+ (
+ [],
+ '3.2.1',
+ expected_pkgs, # none found
+ ),
+ (
+ [Package('spam', '3.2.1')],
+ '3.2',
+ ['eggs'], # completely missing
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
+ '3.2',
+ ['eggs'], # not the right version
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
+ '3.2',
+ [], # all found
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
+ '3.2.1',
+ [], # found with more specific version
+ ),
+ (
+ [Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5')],
+ '3.2.1',
+ ['spam'], # eggs found with multiple versions
+ ),
+])
+def test_check_pkgs_for_precise_version(pkgs, requested_release, expect_not_found):
+ if expect_not_found:
+ with pytest.raises(aos_version.PreciseVersionNotFound) as e:
+ aos_version._check_precise_version_found(pkgs, expected_pkgs, requested_release)
+ assert set(expect_not_found) == set(e.value.problem_pkgs)
+ else:
+ aos_version._check_precise_version_found(pkgs, expected_pkgs, requested_release)
+
+
+@pytest.mark.parametrize('pkgs, requested_release, expect_higher', [
+ (
+ [],
+ '3.2.1',
+ [],
+ ),
+ (
+ [Package('spam', '3.2.1')],
+ '3.2',
+ [], # more precise but not strictly higher
+ ),
+ (
+ [Package('spam', '3.3')],
+ '3.2.1',
+ ['spam-3.3'], # lower precision, but higher
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
+ '3.2',
+ ['eggs-3.3.2'], # one too high
+ ),
+ (
+ [Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
+ '3.2.1',
+ ['eggs-3.4'], # multiple versions, one is higher
+ ),
+ (
+ [Package('eggs', '3.2.1'), Package('eggs', '3.4'), Package('eggs', '3.3')],
+ '3.2.1',
+ ['eggs-3.4'], # multiple versions, two are higher
+ ),
+])
+def test_check_pkgs_for_greater_version(pkgs, requested_release, expect_higher):
+ if expect_higher:
+ with pytest.raises(aos_version.FoundHigherVersion) as e:
+ aos_version._check_higher_version_found(pkgs, expected_pkgs, requested_release)
+ assert set(expect_higher) == set(e.value.problem_pkgs)
+ else:
+ aos_version._check_higher_version_found(pkgs, expected_pkgs, requested_release)
+
+
+@pytest.mark.parametrize('pkgs, expect_to_flag_pkgs', [
+ (
+ [],
+ [],
+ ),
+ (
+ [Package('spam', '3.2.1')],
+ [],
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.2')],
+ [],
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('spam', '3.3.2')],
+ ['spam'],
+ ),
+ (
+ [Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
+ ['eggs'],
+ ),
+])
+def test_check_pkgs_for_multi_release(pkgs, expect_to_flag_pkgs):
+ if expect_to_flag_pkgs:
+ with pytest.raises(aos_version.FoundMultiRelease) as e:
+ aos_version._check_multi_minor_release(pkgs, expected_pkgs)
+ assert set(expect_to_flag_pkgs) == set(e.value.problem_pkgs)
+ else:
+ aos_version._check_multi_minor_release(pkgs, expected_pkgs)
diff --git a/roles/openshift_health_checker/test/conftest.py b/roles/openshift_health_checker/test/conftest.py
index d16401260..3cbd65507 100644
--- a/roles/openshift_health_checker/test/conftest.py
+++ b/roles/openshift_health_checker/test/conftest.py
@@ -6,5 +6,6 @@ import sys
openshift_health_checker_path = os.path.dirname(os.path.dirname(__file__))
sys.path[1:1] = [
openshift_health_checker_path,
- os.path.join(openshift_health_checker_path, 'action_plugins')
+ os.path.join(openshift_health_checker_path, 'action_plugins'),
+ os.path.join(openshift_health_checker_path, 'library'),
]
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 25385339a..f7e916a46 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -3,6 +3,20 @@ import pytest
from openshift_checks.package_availability import PackageAvailability
+@pytest.mark.parametrize('pkg_mgr,is_containerized,is_active', [
+ ('yum', False, True),
+ ('yum', True, False),
+ ('dnf', True, False),
+ ('dnf', False, False),
+])
+def test_is_active(pkg_mgr, is_containerized, is_active):
+ task_vars = dict(
+ ansible_pkg_mgr=pkg_mgr,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert PackageAvailability.is_active(task_vars=task_vars) == is_active
+
+
@pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
(
dict(openshift=dict(common=dict(service_type='openshift'))),
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index cc1d263bc..196d9816a 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -1,21 +1,46 @@
+import pytest
+
from openshift_checks.package_version import PackageVersion
def test_package_version():
task_vars = dict(
openshift=dict(common=dict(service_type='origin')),
- openshift_release='v3.5',
+ openshift_release='3.5',
+ openshift_deployment_type='origin',
)
return_value = object()
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
assert module_name == 'aos_version'
- assert 'prefix' in module_args
- assert 'version' in module_args
- assert module_args['prefix'] == task_vars['openshift']['common']['service_type']
- assert module_args['version'] == task_vars['openshift_release']
+ assert 'requested_openshift_release' in module_args
+ assert 'openshift_deployment_type' in module_args
+ assert 'rpm_prefix' in module_args
+ assert module_args['requested_openshift_release'] == task_vars['openshift_release']
+ assert module_args['openshift_deployment_type'] == task_vars['openshift_deployment_type']
+ assert module_args['rpm_prefix'] == task_vars['openshift']['common']['service_type']
return return_value
check = PackageVersion(execute_module=execute_module)
result = check.run(tmp=None, task_vars=task_vars)
assert result is return_value
+
+
+@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+ (['masters'], False, True),
+ # ensure check is skipped on containerized installs
+ (['masters'], True, False),
+ (['nodes'], False, True),
+ (['masters', 'nodes'], False, True),
+ (['masters', 'etcd'], False, True),
+ ([], False, False),
+ (['etcd'], False, False),
+ (['lb'], False, False),
+ (['nfs'], False, False),
+])
+def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert PackageVersion.is_active(task_vars=task_vars) == is_active
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 0b8042473..6e691c26f 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -109,7 +109,7 @@
type: persistentVolumeClaim
claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
when:
- - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack']
+ - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs']
- name: Create OpenShift registry
oc_adm_registry:
@@ -123,3 +123,7 @@
volume_mounts: "{{ openshift_hosted_registry_volumes }}"
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
+
+- include: storage/glusterfs.yml
+ when:
+ - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs'
diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
new file mode 100644
index 000000000..b18b24266
--- /dev/null
+++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
@@ -0,0 +1,51 @@
+---
+- name: Wait for registry pods
+ oc_obj:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ state: list
+ kind: pod
+ selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}"
+ register: registry_pods
+ until:
+ - "registry_pods.results.results[0]['items'] | count > 0"
+ # There must be as many matching pods with 'Ready' status True as there are expected replicas
+ - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int"
+ delay: 10
+ retries: "{{ (600 / 10) | int }}"
+
+- name: Determine registry fsGroup
+ set_fact:
+ openshift_hosted_registry_fsgroup: "{{ registry_pods.results.results[0]['items'][0].spec.securityContext.fsGroup }}"
+
+- name: Create temp mount directory
+ command: mktemp -d /tmp/openshift-glusterfs-registry-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- name: Mount registry volume
+ mount:
+ state: mounted
+ fstype: glusterfs
+ src: "{{ groups.oo_glusterfs_to_config[0] }}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ name: "{{ mktemp.stdout }}"
+
+- name: Set registry volume permissions
+ file:
+ dest: "{{ mktemp.stdout }}"
+ state: directory
+ group: "{{ openshift_hosted_registry_fsgroup }}"
+ mode: "2775"
+ recurse: True
+
+- name: Unmount registry volume
+ mount:
+ state: unmounted
+ name: "{{ mktemp.stdout }}"
+
+- name: Delete temp mount directory
+ file:
+ dest: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml
index 9679d209a..92e68a0a3 100644
--- a/roles/openshift_logging/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default(openshift_release | default ('3.5.0') ) }}"
+__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('3.6.0') }}"
diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh
deleted file mode 100755
index f977b6dd6..000000000
--- a/roles/openshift_metrics/files/import_jks_certs.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-set -ex
-
-function import_certs() {
- dir=$CERT_DIR
- hawkular_metrics_keystore_password=$(echo $METRICS_KEYSTORE_PASSWD | base64 --decode)
- hawkular_metrics_truststore_password=$(echo $METRICS_TRUSTSTORE_PASSWD | base64 --decode)
- hawkular_alias=`keytool -noprompt -list -keystore $dir/hawkular-metrics.truststore -storepass ${hawkular_metrics_truststore_password} | sed -n '7~2s/,.*$//p'`
-
- if [ ! -f $dir/hawkular-metrics.keystore ]; then
- echo "Creating the Hawkular Metrics keystore from the PEM file"
- keytool -importkeystore -v \
- -srckeystore $dir/hawkular-metrics.pkcs12 \
- -destkeystore $dir/hawkular-metrics.keystore \
- -srcstoretype PKCS12 \
- -deststoretype JKS \
- -srcstorepass $hawkular_metrics_keystore_password \
- -deststorepass $hawkular_metrics_keystore_password
- fi
-
- cert_alias_names=(ca metricca)
-
- for cert_alias in ${cert_alias_names[*]}; do
- if [[ ! ${hawkular_alias[*]} =~ "$cert_alias" ]]; then
- echo "Importing the CA Certificate with alias $cert_alias into the Hawkular Metrics Truststore"
- keytool -noprompt -import -v -trustcacerts -alias $cert_alias \
- -file ${dir}/ca.crt \
- -keystore $dir/hawkular-metrics.truststore \
- -trustcacerts \
- -storepass $hawkular_metrics_truststore_password
- fi
- done
-}
-
-import_certs
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 01fc1ef64..07b7eca33 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -13,21 +13,6 @@
hostnames: hawkular-cassandra
changed_when: no
-- slurp: src={{ mktemp.stdout }}/hawkular-metrics-truststore.pwd
- register: hawkular_truststore_password
-
-- stat: path="{{mktemp.stdout}}/{{item}}"
- register: pwd_file_stat
- with_items:
- - hawkular-metrics.pwd
- - hawkular-metrics.htpasswd
- changed_when: no
-
-- set_fact:
- pwd_files: "{{pwd_files | default({}) | combine ({item.item: item.stat}) }}"
- with_items: "{{pwd_file_stat.results}}"
- changed_when: no
-
- name: generate password for hawkular metrics
local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
with_items:
@@ -47,8 +32,6 @@
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
-- include: import_jks_certs.yaml
-
- name: read files for the hawkular-metrics secret
shell: >
printf '%s: ' '{{ item }}'
@@ -56,13 +39,11 @@
register: hawkular_secrets
with_items:
- ca.crt
- - hawkular-metrics.crt
- - hawkular-metrics.keystore
- - hawkular-metrics-keystore.pwd
- - hawkular-metrics.truststore
- - hawkular-metrics-truststore.pwd
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
+ - hawkular-metrics.crt
+ - hawkular-metrics.key
+ - hawkular-metrics.pem
- hawkular-cassandra.crt
- hawkular-cassandra.key
- hawkular-cassandra.pem
@@ -73,42 +54,23 @@
{{ hawkular_secrets.results|map(attribute='stdout')|join('
')|from_yaml }}
-- name: generate hawkular-metrics-secrets secret template
- template:
- src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_secrets.yaml"
- vars:
- name: hawkular-metrics-secrets
- labels:
- metrics-infra: hawkular-metrics
- data:
- hawkular-metrics.keystore: >
- {{ hawkular_secrets['hawkular-metrics.keystore'] }}
- hawkular-metrics.keystore.password: >
- {{ hawkular_secrets['hawkular-metrics-keystore.pwd'] }}
- hawkular-metrics.truststore: >
- {{ hawkular_secrets['hawkular-metrics.truststore'] }}
- hawkular-metrics.truststore.password: >
- {{ hawkular_secrets['hawkular-metrics-truststore.pwd'] }}
- hawkular-metrics.keystore.alias: "{{ 'hawkular-metrics'|b64encode }}"
- hawkular-metrics.htpasswd.file: >
- {{ hawkular_secrets['hawkular-metrics.htpasswd'] }}
- when: name not in metrics_secrets.stdout_lines
- changed_when: no
-
-- name: generate hawkular-metrics-certificate secret template
+- name: generate hawkular-metrics-certs secret template
template:
src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_certificate.yaml"
+ dest: "{{ mktemp.stdout }}/templates/hawkular-metrics-certs.yaml"
vars:
- name: hawkular-metrics-certificate
+ name: hawkular-metrics-certs
labels:
- metrics-infra: hawkular-metrics
+ metrics-infra: hawkular-metrics-certs
+ annotations:
+ service.alpha.openshift.io/originating-service-name: hawkular-metrics
data:
- hawkular-metrics.certificate: >
+ tls.crt: >
{{ hawkular_secrets['hawkular-metrics.crt'] }}
- hawkular-metrics-ca.certificate: >
- {{ hawkular_secrets['ca.crt'] }}
+ tls.key: >
+ {{ hawkular_secrets['hawkular-metrics.key'] }}
+ tls.truststore.crt: >
+ {{ hawkular_secrets['hawkular-cassandra.crt'] }}
when: name not in metrics_secrets.stdout_lines
changed_when: no
@@ -122,6 +84,7 @@
metrics-infra: hawkular-metrics
data:
hawkular-metrics.username: "{{ 'hawkular'|b64encode }}"
+ hawkular-metrics.htpasswd: "{{ hawkular_secrets['hawkular-metrics.htpasswd'] }}"
hawkular-metrics.password: >
{{ hawkular_secrets['hawkular-metrics.pwd'] }}
when: name not in metrics_secrets.stdout_lines
diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml
deleted file mode 100644
index e098145e9..000000000
--- a/roles/openshift_metrics/tasks/import_jks_certs.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- stat: path="{{mktemp.stdout}}/hawkular-metrics.keystore"
- register: metrics_keystore
- check_mode: no
-
-- stat: path="{{mktemp.stdout}}/hawkular-metrics.truststore"
- register: metrics_truststore
- check_mode: no
-
-- block:
- - slurp: src={{ mktemp.stdout }}/hawkular-metrics-keystore.pwd
- register: metrics_keystore_password
-
- - fetch:
- dest: "{{local_tmp.stdout}}/"
- src: "{{ mktemp.stdout }}/{{item}}"
- flat: yes
- changed_when: False
- with_items:
- - hawkular-metrics.pkcs12
- - hawkular-metrics.crt
- - ca.crt
-
- - local_action: command {{role_path}}/files/import_jks_certs.sh
- environment:
- CERT_DIR: "{{local_tmp.stdout}}"
- METRICS_KEYSTORE_PASSWD: "{{metrics_keystore_password.content}}"
- METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}"
- changed_when: False
-
- - copy:
- dest: "{{mktemp.stdout}}/"
- src: "{{item}}"
- with_fileglob: "{{local_tmp.stdout}}/*.*store"
-
- when: not metrics_keystore.stat.exists or
- not metrics_truststore.stat.exists
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index c490bcdd3..8d27c4930 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -20,7 +20,7 @@
- set_fact:
heapster_sa_secrets: "{{ heapster_sa_secrets + [item] }}"
with_items:
- - hawkular-metrics-certificate
+ - hawkular-metrics-certs
- hawkular-metrics-account
when: "not {{ openshift_metrics_heapster_standalone | bool }}"
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index 361378df3..401db4e58 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -40,24 +40,20 @@ spec:
- "-Dhawkular.metrics.cassandra.nodes=hawkular-cassandra"
- "-Dhawkular.metrics.cassandra.use-ssl"
- "-Dhawkular.metrics.openshift.auth-methods=openshift-oauth,htpasswd"
- - "-Dhawkular.metrics.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file"
+ - "-Dhawkular.metrics.openshift.htpasswd-file=/hawkular-account/hawkular-metrics.htpasswd"
- "-Dhawkular.metrics.allowed-cors-access-control-allow-headers=authorization"
- "-Dhawkular.metrics.default-ttl={{openshift_metrics_duration}}"
- "-Dhawkular.metrics.admin-tenant=_hawkular_admin"
- "-Dhawkular-alerts.cassandra-nodes=hawkular-cassandra"
- "-Dhawkular-alerts.cassandra-use-ssl"
- "-Dhawkular.alerts.openshift.auth-methods=openshift-oauth,htpasswd"
- - "-Dhawkular.alerts.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file"
+ - "-Dhawkular.alerts.openshift.htpasswd-file=/hawkular-account/hawkular-metrics.htpasswd"
- "-Dhawkular.alerts.allowed-cors-access-control-allow-headers=authorization"
- "-Dorg.apache.tomcat.util.buf.UDecoder.ALLOW_ENCODED_SLASH=true"
- "-Dorg.apache.catalina.connector.CoyoteAdapter.ALLOW_BACKSLASH=true"
- "-Dcom.datastax.driver.FORCE_NIO=true"
- "-DKUBERNETES_MASTER_URL={{openshift_metrics_master_url}}"
- "-DUSER_WRITE_ACCESS={{openshift_metrics_hawkular_user_write_access}}"
- - "--hmw.keystore=/secrets/hawkular-metrics.keystore"
- - "--hmw.truststore=/secrets/hawkular-metrics.truststore"
- - "--hmw.keystore_password_file=/secrets/hawkular-metrics.keystore.password"
- - "--hmw.truststore_password_file=/secrets/hawkular-metrics.truststore.password"
env:
- name: POD_NAMESPACE
valueFrom:
@@ -67,6 +63,8 @@ spec:
value: "{{ openshift_metrics_master_url }}"
- name: JGROUPS_PASSWORD
value: "{{ 17 | oo_random_word }}"
+ - name: TRUSTSTORE_AUTHORITIES
+ value: "/hawkular-metrics-certs/tls.truststore.crt"
- name: OPENSHIFT_KUBE_PING_NAMESPACE
valueFrom:
fieldRef:
@@ -76,10 +74,10 @@ spec:
- name: STARTUP_TIMEOUT
value: "{{ openshift_metrics_startup_timeout }}"
volumeMounts:
- - name: hawkular-metrics-secrets
- mountPath: "/secrets"
- - name: hawkular-metrics-client-secrets
- mountPath: "/client-secrets"
+ - name: hawkular-metrics-certs
+ mountPath: "/hawkular-metrics-certs"
+ - name: hawkular-metrics-account
+ mountPath: "/hawkular-account"
{% if ((openshift_metrics_hawkular_limits_cpu is defined and openshift_metrics_hawkular_limits_cpu is not none)
or (openshift_metrics_hawkular_limits_memory is defined and openshift_metrics_hawkular_limits_memory is not none)
or (openshift_metrics_hawkular_requests_cpu is defined and openshift_metrics_hawkular_requests_cpu is not none)
@@ -118,9 +116,9 @@ spec:
command:
- "/opt/hawkular/scripts/hawkular-metrics-liveness.py"
volumes:
- - name: hawkular-metrics-secrets
+ - name: hawkular-metrics-certs
secret:
- secretName: hawkular-metrics-secrets
- - name: hawkular-metrics-client-secrets
+ secretName: hawkular-metrics-certs
+ - name: hawkular-metrics-account
secret:
secretName: hawkular-metrics-account
diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2
index 7c837db4d..f01ccfd58 100644
--- a/roles/openshift_metrics/templates/heapster.j2
+++ b/roles/openshift_metrics/templates/heapster.j2
@@ -43,15 +43,15 @@ spec:
- "--wrapper.username_file=/hawkular-account/hawkular-metrics.username"
- "--wrapper.password_file=/hawkular-account/hawkular-metrics.password"
- "--wrapper.endpoint_check=https://hawkular-metrics:443/hawkular/metrics/status"
- - "--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId={{openshift_metrics_node_id}}&caCert=/hawkular-cert/hawkular-metrics-ca.certificate&user=%username%&pass=%password%&filter=label(container_name:^system.slice.*|^user.slice)"
+ - "--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId={{openshift_metrics_node_id}}&caCert=/hawkular-metrics-certs/tls.crt&user=%username%&pass=%password%&filter=label(container_name:^system.slice.*|^user.slice)"
{% endif %}
env:
- name: STARTUP_TIMEOUT
value: "{{ openshift_metrics_startup_timeout }}"
-{% if ((openshift_metrics_heapster_limits_cpu is defined and openshift_metrics_heapster_limits_cpu is not none)
+{% if ((openshift_metrics_heapster_limits_cpu is defined and openshift_metrics_heapster_limits_cpu is not none)
or (openshift_metrics_heapster_limits_memory is defined and openshift_metrics_heapster_limits_memory is not none)
or (openshift_metrics_heapster_requests_cpu is defined and openshift_metrics_heapster_requests_cpu is not none)
- or (openshift_metrics_heapster_requests_memory is defined and openshift_metrics_heapster_requests_memory is not none))
+ or (openshift_metrics_heapster_requests_memory is defined and openshift_metrics_heapster_requests_memory is not none))
%}
resources:
{% if (openshift_metrics_heapster_limits_cpu is not none
@@ -65,8 +65,8 @@ spec:
memory: "{{openshift_metrics_heapster_limits_memory}}"
{% endif %}
{% endif %}
-{% if (openshift_metrics_heapster_requests_cpu is not none
- or openshift_metrics_heapster_requests_memory is not none)
+{% if (openshift_metrics_heapster_requests_cpu is not none
+ or openshift_metrics_heapster_requests_memory is not none)
%}
requests:
{% if openshift_metrics_heapster_requests_cpu is not none %}
@@ -81,8 +81,8 @@ spec:
- name: heapster-secrets
mountPath: "/secrets"
{% if not openshift_metrics_heapster_standalone %}
- - name: hawkular-metrics-certificate
- mountPath: "/hawkular-cert"
+ - name: hawkular-metrics-certs
+ mountPath: "/hawkular-metrics-certs"
- name: hawkular-metrics-account
mountPath: "/hawkular-account"
readinessProbe:
@@ -95,9 +95,9 @@ spec:
secret:
secretName: heapster-secrets
{% if not openshift_metrics_heapster_standalone %}
- - name: hawkular-metrics-certificate
+ - name: hawkular-metrics-certs
secret:
- secretName: hawkular-metrics-certificate
+ secretName: hawkular-metrics-certs
- name: hawkular-metrics-account
secret:
secretName: hawkular-metrics-account
diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml
index f28c3ce48..b20957550 100644
--- a/roles/openshift_metrics/vars/openshift-enterprise.yml
+++ b/roles/openshift_metrics/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default(openshift_release | default ('3.5.0') ) }}"
+__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('3.6.0') }}"
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 626248306..59003bbf9 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -34,6 +34,33 @@
dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
env_vars: "{{ openshift_node_env_vars | default(None) }}"
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+- name: Check for swap usage
+ command: grep "^[^#].*swap" /etc/fstab
+ # grep: match any lines which don't begin with '#' and contain 'swap'
+ # command: swapon --summary
+ # Alternate option, however if swap entries are in fstab, swap will be
+ # enabled at boot. Grepping fstab should catch a condition when swap was
+ # disabled, but the fstab entries were not removed.
+ changed_when: false
+ failed_when: false
+ register: swap_result
+
+ # Disable Swap Block
+- block:
+
+ - name: Disable swap
+ command: swapoff --all
+
+ - name: Remove swap entries from /etc/fstab
+ lineinfile:
+ dest: /etc/fstab
+ regexp: 'swap'
+ state: absent
+
+ when: swap_result.stdout_lines | length > 0
+ # End Disable Swap Block
+
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
- name: Install Node package
diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml
index cd2f362aa..2a36d8945 100644
--- a/roles/openshift_node_upgrade/meta/main.yml
+++ b/roles/openshift_node_upgrade/meta/main.yml
@@ -10,4 +10,5 @@ galaxy_info:
versions:
- 7
dependencies:
+- role: lib_utils
- role: openshift_common
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index 6ae8dbc12..01bd3bf38 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -84,6 +84,33 @@
value: "{{ oreg_url }}"
when: oreg_url is defined
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+- name: Check for swap usage
+ command: grep "^[^#].*swap" /etc/fstab
+ # grep: match any lines which don't begin with '#' and contain 'swap'
+ # command: swapon --summary
+ # Alternate option, however if swap entries are in fstab, swap will be
+ # enabled at boot. Grepping fstab should catch a condition when swap was
+ # disabled, but the fstab entries were not removed.
+ changed_when: false
+ failed_when: false
+ register: swap_result
+
+ # Disable Swap Block
+- block:
+
+ - name: Disable swap
+ command: swapoff --all
+
+ - name: Remove swap entries from /etc/fstab
+ lineinfile:
+ dest: /etc/fstab
+ regexp: 'swap'
+ state: absent
+
+ when: swap_result.stdout_lines | length > 0
+ # End Disable Swap Block
+
- name: Restart rpm node service
service:
name: "{{ openshift.common.service_type }}-node"
diff --git a/roles/openshift_provisioners/README.md b/roles/openshift_provisioners/README.md
new file mode 100644
index 000000000..7449073e6
--- /dev/null
+++ b/roles/openshift_provisioners/README.md
@@ -0,0 +1,29 @@
+# OpenShift External Dynamic Provisioners
+
+## Required Vars
+* `openshift_provisioners_install_provisioners`: When `True` the openshift_provisioners role will install provisioners that have their "master" var (e.g. `openshift_provisioners_efs`) set `True`. When `False` will uninstall provisioners that have their var set `True`.
+
+## Optional Vars
+* `openshift_provisioners_image_prefix`: The prefix for the provisioner images to use. Defaults to 'docker.io/openshift/origin-'.
+* `openshift_provisioners_image_version`: The image version for the provisioner images to use. Defaults to 'latest'.
+* `openshift_provisioners_project`: The namespace that provisioners will be installed in. Defaults to 'openshift-infra'.
+
+## AWS EFS
+
+### Prerequisites
+* An IAM user assigned the AmazonElasticFileSystemReadOnlyAccess policy (or better)
+* An EFS file system in your cluster's region
+* [Mount targets](http://docs.aws.amazon.com/efs/latest/ug/accessing-fs.html) and [security groups](http://docs.aws.amazon.com/efs/latest/ug/accessing-fs-create-security-groups.html) such that any node (in any zone in the cluster's region) can mount the EFS file system by its [File system DNS name](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html)
+
+### Required Vars
+* `openshift_provisioners_efs_fsid`: The [File system ID](http://docs.aws.amazon.com/efs/latest/ug/gs-step-two-create-efs-resources.html) of the EFS file system, e.g. fs-47a2c22e.
+* `openshift_provisioners_efs_region`: The Amazon EC2 region of the EFS file system.
+* `openshift_provisioners_efs_aws_access_key_id`: The AWS access key of the IAM user, used to check that the EFS file system specified actually exists.
+* `openshift_provisioners_efs_aws_secret_access_key`: The AWS secret access key of the IAM user, used to check that the EFS file system specified actually exists.
+
+### Optional Vars
+* `openshift_provisioners_efs`: When `True` the AWS EFS provisioner will be installed or uninstalled according to whether `openshift_provisioners_install_provisioners` is `True` or `False`, respectively. Defaults to `False`.
+* `openshift_provisioners_efs_path`: The path of the directory in the EFS file system in which the EFS provisioner will create a directory to back each PV it creates. It must exist and be mountable by the EFS provisioner. Defaults to '/persistentvolumes'.
+* `openshift_provisioners_efs_name`: The `provisioner` name that `StorageClasses` specify. Defaults to 'openshift.org/aws-efs'.
+* `openshift_provisioners_efs_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
+* `openshift_provisioners_efs_supplementalgroup`: The supplemental group to give the pod in case it is needed for permission to write to the EFS file system. Defaults to '65534'.
diff --git a/roles/openshift_provisioners/defaults/main.yaml b/roles/openshift_provisioners/defaults/main.yaml
new file mode 100644
index 000000000..a6f040831
--- /dev/null
+++ b/roles/openshift_provisioners/defaults/main.yaml
@@ -0,0 +1,12 @@
+---
+openshift_provisioners_install_provisioners: True
+openshift_provisioners_image_prefix: docker.io/openshift/origin-
+openshift_provisioners_image_version: latest
+
+openshift_provisioners_efs: False
+openshift_provisioners_efs_path: /persistentvolumes
+openshift_provisioners_efs_name: openshift.org/aws-efs
+openshift_provisioners_efs_nodeselector: ""
+openshift_provisioners_efs_supplementalgroup: '65534'
+
+openshift_provisioners_project: openshift-infra
diff --git a/roles/openshift_provisioners/meta/main.yaml b/roles/openshift_provisioners/meta/main.yaml
new file mode 100644
index 000000000..cb9278eb7
--- /dev/null
+++ b/roles/openshift_provisioners/meta/main.yaml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Provisioners
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml
new file mode 100644
index 000000000..ac21a5e37
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml
@@ -0,0 +1,19 @@
+---
+- name: Generate ClusterRoleBindings
+ template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-clusterrolebinding.yaml
+ vars:
+ acct_name: provisioners-{{item}}
+ obj_name: run-provisioners-{{item}}
+ labels:
+ provisioners-infra: support
+ crb_usernames: ["system:serviceaccount:{{openshift_provisioners_project}}:{{acct_name}}"]
+ subjects:
+ - kind: ServiceAccount
+ name: "{{acct_name}}"
+ namespace: "{{openshift_provisioners_project}}"
+ cr_name: "system:persistent-volume-provisioner"
+ with_items:
+ # TODO
+ - efs
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_provisioners/tasks/generate_secrets.yaml b/roles/openshift_provisioners/tasks/generate_secrets.yaml
new file mode 100644
index 000000000..e6cbb1bbf
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/generate_secrets.yaml
@@ -0,0 +1,14 @@
+---
+- name: Generate secret for efs
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-secret.yaml
+ vars:
+ name: efs
+ obj_name: "provisioners-efs"
+ labels:
+ provisioners-infra: support
+ secrets:
+ - {key: aws-access-key-id, value: "{{openshift_provisioners_efs_aws_access_key_id}}"}
+ - {key: aws-secret-access-key, value: "{{openshift_provisioners_efs_aws_secret_access_key}}"}
+ check_mode: no
+ changed_when: no
+ when: openshift_provisioners_efs | bool
diff --git a/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml
new file mode 100644
index 000000000..4fe0583ee
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml
@@ -0,0 +1,12 @@
+---
+- name: Generating serviceaccounts
+ template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-sa.yaml
+ vars:
+ obj_name: provisioners-{{item}}
+ labels:
+ provisioners-infra: support
+ with_items:
+ # TODO
+ - efs
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
new file mode 100644
index 000000000..57279c665
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -0,0 +1,70 @@
+---
+- name: Check efs current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs
+ -o jsonpath='{.spec.replicas}' -n {{openshift_provisioners_project}}
+ register: efs_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Generate efs PersistentVolumeClaim
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "provisioners-efs"
+ size: "1Mi"
+ access_modes:
+ - "ReadWriteMany"
+ pv_selector:
+ provisioners-efs: efs
+ check_mode: no
+ changed_when: no
+
+- name: Generate efs PersistentVolume
+ template: src=pv.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pv.yaml
+ vars:
+ obj_name: "provisioners-efs"
+ size: "1Mi"
+ access_modes:
+ - "ReadWriteMany"
+ labels:
+ provisioners-efs: efs
+ volume_plugin: "nfs"
+ volume_source:
+ - {key: "server", value: "{{openshift_provisioners_efs_fsid}}.efs.{{openshift_provisioners_efs_region}}.amazonaws.com"}
+ - {key: "path", value: "{{openshift_provisioners_efs_path}}"}
+ claim_name: "provisioners-efs"
+ check_mode: no
+ changed_when: no
+
+- name: Generate efs DeploymentConfig
+ template:
+ src: efs.j2
+ dest: "{{ mktemp.stdout }}/templates/{{deploy_name}}-dc.yaml"
+ vars:
+ name: efs
+ deploy_name: "provisioners-efs"
+ deploy_serviceAccount: "provisioners-efs"
+ replica_count: "{{efs_replica_count.stdout | default(0)}}"
+ node_selector: "{{openshift_provisioners_efs_nodeselector | default('') }}"
+ claim_name: "provisioners-efs"
+ check_mode: no
+ changed_when: false
+
+# anyuid in order to run as root & chgrp shares with allocated gids
+- name: "Check efs anyuid permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get scc/anyuid -o jsonpath='{.users}'
+ register: efs_anyuid
+ check_mode: no
+ changed_when: no
+
+- name: "Set anyuid permissions for efs"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
+ register: efs_output
+ failed_when: "efs_output.rc == 1 and 'exists' not in efs_output.stderr"
+ check_mode: no
+ when: efs_anyuid.stdout.find("system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs") == -1
diff --git a/roles/openshift_provisioners/tasks/install_provisioners.yaml b/roles/openshift_provisioners/tasks/install_provisioners.yaml
new file mode 100644
index 000000000..324fdcc82
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/install_provisioners.yaml
@@ -0,0 +1,55 @@
+---
+- name: Check that EFS File System ID is set
+ fail: msg='the openshift_provisioners_efs_fsid variable is required'
+ when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_fsid is not defined
+
+- name: Check that EFS region is set
+ fail: msg='the openshift_provisioners_efs_region variable is required'
+ when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_region is not defined
+
+- name: Check that EFS AWS access key id is set
+ fail: msg='the openshift_provisioners_efs_aws_access_key_id variable is required'
+ when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_access_key_id is not defined
+
+- name: Check that EFS AWS secret access key is set
+ fail: msg='the openshift_provisioners_efs_aws_secret_access_key variable is required'
+ when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_secret_access_key is not defined
+
+- name: Install support
+ include: install_support.yaml
+
+- name: Install EFS
+ include: install_efs.yaml
+ when: openshift_provisioners_efs | bool
+
+- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+ register: object_def_files
+ changed_when: no
+
+- slurp: src={{item}}
+ register: object_defs
+ with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}"
+ changed_when: no
+
+- name: Create objects
+ include: oc_apply.yaml
+ vars:
+ - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ - namespace: "{{ openshift_provisioners_project }}"
+ - file_name: "{{ file.source }}"
+ - file_content: "{{ file.content | b64decode | from_yaml }}"
+ with_items: "{{ object_defs.results }}"
+ loop_control:
+ loop_var: file
+ when: not ansible_check_mode
+
+- name: Printing out objects to create
+ debug: msg={{file.content | b64decode }}
+ with_items: "{{ object_defs.results }}"
+ loop_control:
+ loop_var: file
+ when: ansible_check_mode
+
+- name: Scaling up cluster
+ include: start_cluster.yaml
+ when: start_cluster | default(true) | bool
diff --git a/roles/openshift_provisioners/tasks/install_support.yaml b/roles/openshift_provisioners/tasks/install_support.yaml
new file mode 100644
index 000000000..ba472f1c9
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/install_support.yaml
@@ -0,0 +1,24 @@
+---
+- name: Check for provisioners project already exists
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_provisioners_project}} --no-headers
+ register: provisioners_project_result
+ ignore_errors: yes
+ when: not ansible_check_mode
+ changed_when: no
+
+- name: Create provisioners project
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_provisioners_project}}
+ when: not ansible_check_mode and "not found" in provisioners_project_result.stderr
+
+- name: Create temp directory for all our templates
+ file: path={{mktemp.stdout}}/templates state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_secrets.yaml
+
+- include: generate_clusterrolebindings.yaml
+
+- include: generate_serviceaccounts.yaml
diff --git a/roles/openshift_provisioners/tasks/main.yaml b/roles/openshift_provisioners/tasks/main.yaml
new file mode 100644
index 000000000..a50c78c97
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/main.yaml
@@ -0,0 +1,27 @@
+---
+- name: Create temp directory for doing work in
+ command: mktemp -td openshift-provisioners-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift.common.config_base}}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+ check_mode: no
+ tags: provisioners_init
+
+- include: "{{ role_path }}/tasks/install_provisioners.yaml"
+ when: openshift_provisioners_install_provisioners | default(false) | bool
+
+- include: "{{ role_path }}/tasks/uninstall_provisioners.yaml"
+ when: not openshift_provisioners_install_provisioners | default(false) | bool
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ tags: provisioners_cleanup
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml
new file mode 100644
index 000000000..49d03f203
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/oc_apply.yaml
@@ -0,0 +1,51 @@
+---
+- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}}
+ register: generation_init
+ failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
+ changed_when: no
+
+- name: Applying {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: no
+
+- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}}
+ register: generation_changed
+ failed_when: "'not found' not in generation_changed.stderr and generation_changed.stdout == ''"
+ changed_when: generation_changed.stdout | default (0) | int > generation_init.stdout | default(0) | int
+ when:
+ - "'field is immutable' not in generation_apply.stderr"
+
+- name: Removing previous {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ delete -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_delete
+ failed_when: "'error' in generation_delete.stderr"
+ changed_when: generation_delete.rc == 0
+ when: generation_apply.rc != 0
+
+- name: Recreating {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: generation_apply.rc == 0
+ when: generation_apply.rc != 0
diff --git a/roles/openshift_provisioners/tasks/start_cluster.yaml b/roles/openshift_provisioners/tasks/start_cluster.yaml
new file mode 100644
index 000000000..ee7f545a9
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/start_cluster.yaml
@@ -0,0 +1,20 @@
+---
+- name: Retrieve efs
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "provisioners-infra=efs"
+ namespace: "{{openshift_provisioners_project}}"
+ register: efs_dc
+ when: openshift_provisioners_efs | bool
+
+- name: start efs
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_provisioners_project}}"
+ replicas: 1
+ with_items: "{{ efs_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ loop_control:
+ loop_var: object
+ when: openshift_provisioners_efs | bool
diff --git a/roles/openshift_provisioners/tasks/stop_cluster.yaml b/roles/openshift_provisioners/tasks/stop_cluster.yaml
new file mode 100644
index 000000000..30b6b12c8
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/stop_cluster.yaml
@@ -0,0 +1,20 @@
+---
+- name: Retrieve efs
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "provisioners-infra=efs"
+ namespace: "{{openshift_provisioners_project}}"
+ register: efs_dc
+ when: openshift_provisioners_efs | bool
+
+- name: stop efs
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_provisioners_project}}"
+ replicas: 0
+ with_items: "{{ efs_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ loop_control:
+ loop_var: object
+ when: openshift_provisioners_efs | bool
diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
new file mode 100644
index 000000000..0be4bc7d2
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
@@ -0,0 +1,43 @@
+---
+- name: stop provisioners
+ include: stop_cluster.yaml
+
+# delete the deployment objects that we had created
+- name: delete provisioner api objects
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete {{ item }} --selector provisioners-infra -n {{ openshift_provisioners_project }} --ignore-not-found=true
+ with_items:
+ - dc
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our old secrets
+- name: delete provisioner secrets
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete secret {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true
+ with_items:
+ - provisioners-efs
+ ignore_errors: yes
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete cluster role bindings
+- name: delete cluster role bindings
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterrolebindings {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true
+ with_items:
+ - run-provisioners-efs
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our service accounts
+- name: delete service accounts
+ oc_serviceaccount:
+ name: "{{ item }}"
+ namespace: "{{ openshift_provisioners_project }}"
+ state: absent
+ with_items:
+ - provisioners-efs
diff --git a/roles/openshift_provisioners/templates/clusterrolebinding.j2 b/roles/openshift_provisioners/templates/clusterrolebinding.j2
new file mode 100644
index 000000000..994afa32d
--- /dev/null
+++ b/roles/openshift_provisioners/templates/clusterrolebinding.j2
@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{obj_name}}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if crb_usernames is defined %}
+userNames:
+{% for name in crb_usernames %}
+ - {{ name }}
+{% endfor %}
+{% endif %}
+{% if crb_groupnames is defined %}
+groupNames:
+{% for name in crb_groupnames %}
+ - {{ name }}
+{% endfor %}
+{% endif %}
+subjects:
+{% for sub in subjects %}
+ - kind: {{ sub.kind }}
+ name: {{ sub.name }}
+ namespace: {{sub.namespace}}
+{% endfor %}
+roleRef:
+ name: {{cr_name}}
diff --git a/roles/openshift_provisioners/templates/efs.j2 b/roles/openshift_provisioners/templates/efs.j2
new file mode 100644
index 000000000..81b9ccca5
--- /dev/null
+++ b/roles/openshift_provisioners/templates/efs.j2
@@ -0,0 +1,58 @@
+kind: DeploymentConfig
+apiVersion: v1
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provisioners-infra: "{{name}}"
+ name: "{{name}}"
+spec:
+ replicas: {{replica_count}}
+ selector:
+ provisioners-infra: "{{name}}"
+ name: "{{name}}"
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provisioners-infra: "{{name}}"
+ name: "{{name}}"
+ spec:
+ serviceAccountName: "{{deploy_serviceAccount}}"
+{% if node_selector is iterable and node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - name: efs-provisioner
+ image: {{openshift_provisioners_image_prefix}}efs-provisioner:{{openshift_provisioners_image_version}}
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: provisioners-efs
+ key: aws-access-key-id
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: provisioners-efs
+ key: aws-secret-access-key
+ - name: FILE_SYSTEM_ID
+ value: "{{openshift_provisioners_efs_fsid}}"
+ - name: AWS_REGION
+ value: "{{openshift_provisioners_efs_region}}"
+ - name: PROVISIONER_NAME
+ value: "{{openshift_provisioners_efs_name}}"
+ volumeMounts:
+ - name: pv-volume
+ mountPath: /persistentvolumes
+ securityContext:
+ supplementalGroups:
+ - {{openshift_provisioners_efs_supplementalgroup}}
+ volumes:
+ - name: pv-volume
+ persistentVolumeClaim:
+ claimName: "{{claim_name}}"
diff --git a/roles/openshift_provisioners/templates/pv.j2 b/roles/openshift_provisioners/templates/pv.j2
new file mode 100644
index 000000000..f4128f9f0
--- /dev/null
+++ b/roles/openshift_provisioners/templates/pv.j2
@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{obj_name}}
+{% if annotations is defined %}
+ annotations:
+{% for key,value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+ capacity:
+ storage: {{size}}
+ accessModes:
+{% for mode in access_modes %}
+ - {{mode}}
+{% endfor %}
+ {{volume_plugin}}:
+{% for s in volume_source %}
+ {{s.key}}: {{s.value}}
+{% endfor %}
+{% if claim_name is defined%}
+ claimRef:
+ name: {{claim_name}}
+ namespace: {{openshift_provisioners_project}}
+{% endif %}
diff --git a/roles/openshift_provisioners/templates/pvc.j2 b/roles/openshift_provisioners/templates/pvc.j2
new file mode 100644
index 000000000..83d503056
--- /dev/null
+++ b/roles/openshift_provisioners/templates/pvc.j2
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{obj_name}}
+{% if annotations is defined %}
+ annotations:
+{% for key,value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+ accessModes:
+{% for mode in access_modes %}
+ - {{mode}}
+{% endfor %}
+ resources:
+ requests:
+ storage: {{size}}
+
diff --git a/roles/openshift_provisioners/templates/secret.j2 b/roles/openshift_provisioners/templates/secret.j2
new file mode 100644
index 000000000..78824095b
--- /dev/null
+++ b/roles/openshift_provisioners/templates/secret.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{obj_name}}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+type: Opaque
+data:
+{% for s in secrets %}
+ "{{s.key}}" : "{{s.value | b64encode}}"
+{% endfor %}
diff --git a/roles/openshift_provisioners/templates/serviceaccount.j2 b/roles/openshift_provisioners/templates/serviceaccount.j2
new file mode 100644
index 000000000..b22acc594
--- /dev/null
+++ b/roles/openshift_provisioners/templates/serviceaccount.j2
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{obj_name}}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if secrets is defined %}
+secrets:
+{% for name in secrets %}
+- name: {{ name }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
index 95b155b29..abd1997dd 100644
--- a/roles/openshift_repos/README.md
+++ b/roles/openshift_repos/README.md
@@ -12,10 +12,10 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos.
Role Variables
--------------
-| Name | Default value | |
-|-------------------------------|---------------|----------------------------------------------|
-| openshift_deployment_type | None | Possible values enterprise, origin, online |
-| openshift_additional_repos | {} | TODO |
+| Name | Default value | |
+|-------------------------------|---------------|------------------------------------|
+| openshift_deployment_type | None | Possible values enterprise, origin |
+| openshift_additional_repos | {} | TODO |
Dependencies
------------
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index fc562c42c..f15dc16d1 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -1,6 +1,18 @@
---
+- name: Abort when conflicting deployment type variables are set
+ when:
+ - deployment_type is defined
+ - openshift_deployment_type is defined
+ - openshift_deployment_type != deployment_type
+ fail:
+ msg: |-
+ openshift_deployment_type is set to "{{ openshift_deployment_type }}".
+ deployment_type is set to "{{ deployment_type }}".
+ To avoid unexpected results, this conflict is not allowed.
+ deployment_type is deprecated in favor of openshift_deployment_type.
+ Please specify only openshift_deployment_type, or make both the same.
+
- name: Standardize on latest variable names
- no_log: True # keep task description legible
set_fact:
# goal is to deprecate deployment_type in favor of openshift_deployment_type.
# both will be accepted for now, but code should refer to the new name.
@@ -8,8 +20,15 @@
deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
+- name: Abort when deployment type is invalid
+ # this variable is required; complain early and clearly if it is invalid.
+ when: openshift_deployment_type not in known_openshift_deployment_types
+ fail:
+ msg: |-
+ Please set openshift_deployment_type to one of:
+ {{ known_openshift_deployment_types | join(', ') }}
+
- name: Normalize openshift_release
- no_log: True # keep task description legible
set_fact:
# Normalize release if provided, e.g. "v3.5" => "3.5"
# Currently this is not required to be defined for all installs, and the
@@ -19,10 +38,11 @@
openshift_release: "{{ openshift_release | string | regex_replace('^v', '') }}"
when: openshift_release is defined
-- name: Ensure a valid deployment type has been given.
- # this variable is required; complain early and clearly if it is invalid.
- when: openshift_deployment_type not in known_openshift_deployment_types
+- name: Abort when openshift_release is invalid
+ when:
+ - openshift_release is defined
+ - not openshift_release | match('\d+(\.\d+){1,3}$')
fail:
msg: |-
- Please set openshift_deployment_type to one of:
- {{ known_openshift_deployment_types | join(', ') }}
+ openshift_release is "{{ openshift_release }}" which is not a valid version string.
+ Please set it to a version string like "3.4".
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
new file mode 100644
index 000000000..cf0fb94c9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -0,0 +1,60 @@
+OpenShift GlusterFS Cluster
+===========================
+
+OpenShift GlusterFS Cluster Installation
+
+Requirements
+------------
+
+* Ansible 2.2
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value | |
+|--------------------------------------------------|-------------------------|-----------------------------------------|
+| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
+| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources
+| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
+| openshift_storage_glusterfs_nodeselector | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode
+| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
+| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
+| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
+| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
+| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
+| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
+| openshift_storage_glusterfs_heketi_admin_key | '' | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key | '' | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
+| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode
+| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
+
+Dependencies
+------------
+
+* os_firewall
+* openshift_hosted_facts
+* openshift_repos
+* lib_openshift
+
+Example Playbook
+----------------
+
+```
+- name: Configure GlusterFS hosts
+ hosts: oo_first_master
+ roles:
+ - role: openshift_storage_glusterfs
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jose A. Rivera (jarrpa@redhat.com)
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
new file mode 100644
index 000000000..ade850747
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -0,0 +1,17 @@
+---
+openshift_storage_glusterfs_timeout: 300
+openshift_storage_glusterfs_namespace: 'default'
+openshift_storage_glusterfs_is_native: True
+openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}"
+openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
+openshift_storage_glusterfs_version: 'latest'
+openshift_storage_glusterfs_wipe: False
+openshift_storage_glusterfs_heketi_is_native: True
+openshift_storage_glusterfs_heketi_is_missing: True
+openshift_storage_glusterfs_heketi_deploy_is_missing: True
+openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
+openshift_storage_glusterfs_heketi_version: 'latest'
+openshift_storage_glusterfs_heketi_admin_key: ''
+openshift_storage_glusterfs_heketi_user_key: ''
+openshift_storage_glusterfs_heketi_topology_load: True
+openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
diff --git a/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml
new file mode 100644
index 000000000..c9945be13
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml
@@ -0,0 +1,115 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+labels:
+ template: deploy-heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ name: deploy-heketi
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ name: deploy-heketi
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ name: deploy-heketi
+ glusterfs: deploy-heketi-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-service-account
+ containers:
+ - name: deploy-heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: kubernetes
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: IMAGE_NAME
+ displayName: GlusterFS container name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container versiona
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml
new file mode 100644
index 000000000..3f8d8f507
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-registry-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml
new file mode 100644
index 000000000..c66705752
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml
@@ -0,0 +1,128 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs
+ labels:
+ glusterfs: daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs-node: pod
+ template:
+ metadata:
+ name: glusterfs
+ labels:
+ glusterfs-node: pod
+ spec:
+ nodeSelector:
+ storagenode: glusterfs
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 100
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 100
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ resources: {}
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: GlusterFS container name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container versiona
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml
new file mode 100644
index 000000000..df045c170
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml
@@ -0,0 +1,113 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+labels:
+ template: heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-route
+ spec:
+ to:
+ kind: Service
+ name: heketi
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-pod
+ spec:
+ serviceAccountName: heketi-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: kubernetes
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-storage-endpoints
+ path: heketidbstorage
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: IMAGE_NAME
+ displayName: GlusterFS container name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container versiona
+ required: True
diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
new file mode 100644
index 000000000..88801e487
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
@@ -0,0 +1,23 @@
+'''
+ Openshift Storage GlusterFS class that provides useful filters used in GlusterFS
+'''
+
+
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(source.split(delim) for item in source.split(","))
+
+
+# pylint: disable=too-few-public-methods
+class FilterModule(object):
+ ''' OpenShift Storage GlusterFS Filters '''
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ ''' Returns the names of the filters provided by this class '''
+ return {
+ 'map_from_pairs': map_from_pairs
+ }
diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml
new file mode 100644
index 000000000..aab9851f9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Jose A. Rivera
+ description: OpenShift GlusterFS Cluster
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- role: openshift_hosted_facts
+- role: openshift_repos
+- role: lib_openshift
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
new file mode 100644
index 000000000..26ca5eebf
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -0,0 +1,107 @@
+---
+- assert:
+ that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1"
+ msg: Only one GlusterFS nodeselector key pair should be provided
+
+- assert:
+ that: "groups.oo_glusterfs_to_config | count >= 3"
+ msg: There must be at least three GlusterFS nodes specified
+
+- name: Delete pre-existing GlusterFS resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: "template,daemonset"
+ name: glusterfs
+ state: absent
+ when: openshift_storage_glusterfs_wipe
+
+- name: Unlabel any existing GlusterFS nodes
+ oc_label:
+ name: "{{ item }}"
+ kind: node
+ state: absent
+ labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ with_items: "{{ groups.all }}"
+ when: openshift_storage_glusterfs_wipe
+
+- name: Delete pre-existing GlusterFS config
+ file:
+ path: /var/lib/glusterd
+ state: absent
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.oo_glusterfs_to_config }}"
+ when: openshift_storage_glusterfs_wipe
+
+- name: Get GlusterFS storage devices state
+ command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
+ register: devices_info
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.oo_glusterfs_to_config }}"
+ failed_when: False
+ when: openshift_storage_glusterfs_wipe
+
+ # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
+- name: Clear GlusterFS storage device contents
+ shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
+ delegate_to: "{{ item.item }}"
+ with_items: "{{ devices_info.results }}"
+ when:
+ - openshift_storage_glusterfs_wipe
+ - item.stdout_lines | count > 0
+
+- name: Add service accounts to privileged SCC
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ with_items:
+ - 'default'
+ - 'router'
+
+- name: Label GlusterFS nodes
+ oc_label:
+ name: "{{ glusterfs_host }}"
+ kind: node
+ state: add
+ labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ with_items: "{{ groups.oo_glusterfs_to_config }}"
+ loop_control:
+ loop_var: glusterfs_host
+
+- name: Copy GlusterFS DaemonSet template
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-template.yml"
+ dest: "{{ mktemp.stdout }}/glusterfs-template.yml"
+
+- name: Create GlusterFS template
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: template
+ name: glusterfs
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-template.yml"
+
+- name: Deploy GlusterFS pods
+ oc_process:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ template_name: "glusterfs"
+ create: True
+ params:
+ IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}"
+ IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}"
+
+- name: Wait for GlusterFS pods
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs-node=pod"
+ register: glusterfs_pods
+ until:
+ - "glusterfs_pods.results.results[0]['items'] | count > 0"
+ # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
+ - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
new file mode 100644
index 000000000..9f092d5d5
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -0,0 +1,48 @@
+---
+- name: Delete pre-existing GlusterFS registry resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "svc,ep"
+ name: "glusterfs-registry-endpoints"
+ failed_when: False
+
+- name: Generate GlusterFS registry endpoints
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
+ dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Copy GlusterFS registry service
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml"
+ dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
+
+- name: Create GlusterFS registry endpoints
+ oc_obj:
+ namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+ state: present
+ kind: endpoints
+ name: glusterfs-registry-endpoints
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Create GlusterFS registry service
+ oc_obj:
+ namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+ state: present
+ kind: service
+ name: glusterfs-registry-endpoints
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
+
+- name: Check if GlusterFS registry volume exists
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list"
+ register: registry_volume
+
+- name: Create GlusterFS registry volume
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+ when: "'{{ openshift.hosted.registry.storage.glusterfs.path }}' not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
new file mode 100644
index 000000000..76ae1db75
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -0,0 +1,41 @@
+---
+- name: Copy initial heketi resource files
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "deploy-heketi-template.yml"
+
+- name: Create deploy-heketi resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: template
+ name: deploy-heketi
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/deploy-heketi-template.yml"
+
+- name: Deploy deploy-heketi pod
+ oc_process:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ template_name: "deploy-heketi"
+ create: True
+ params:
+ IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
+ IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
+ HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+ HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+
+- name: Wait for deploy-heketi pod
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ register: heketi_pod
+ until:
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
new file mode 100644
index 000000000..84b85e95d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -0,0 +1,109 @@
+---
+- name: Create heketi DB volume
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
+ register: setup_storage
+ failed_when: False
+
+# This is used in the subsequent task
+- name: Copy the admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+ check_mode: no
+
+# Need `command` here because heketi-storage.json contains multiple objects.
+- name: Copy heketi DB to GlusterFS volume
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}"
+ when: "setup_storage.rc == 0"
+
+- name: Wait for copy job to finish
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: job
+ state: list
+ name: "heketi-storage-copy-job"
+ register: heketi_job
+ until:
+ - "'results' in heketi_job.results and heketi_job.results.results | count > 0"
+ # Pod's 'Complete' status must be True
+ - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ failed_when:
+ - "'results' in heketi_job.results"
+ - "heketi_job.results.results | count > 0"
+ # Fail when pod's 'Failed' status is True
+ - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
+ when: "setup_storage.rc == 0"
+
+- name: Delete deploy resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,jobs,dc,secret"
+ selector: "deploy-heketi"
+ failed_when: False
+
+- name: Copy heketi template
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/heketi-template.yml"
+ dest: "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Create heketi resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: template
+ name: heketi
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Deploy heketi pod
+ oc_process:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ template_name: "heketi"
+ create: True
+ params:
+ IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
+ IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
+ HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+ HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+
+- name: Wait for heketi pod
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ until:
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+
+- name: Determine heketi URL
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ state: list
+ kind: ep
+ selector: "glusterfs=heketi-service"
+ register: heketi_url
+ until:
+ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+ - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+
+- name: Set heketi URL
+ set_fact:
+ openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+
+- name: Verify heketi service
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
+ changed_when: False
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
new file mode 100644
index 000000000..265a3cc6e
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -0,0 +1,182 @@
+---
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-glusterfs-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- name: Verify target namespace exists
+ oc_project:
+ state: present
+ name: "{{ openshift_storage_glusterfs_namespace }}"
+ when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native
+
+- include: glusterfs_deploy.yml
+ when: openshift_storage_glusterfs_is_native
+
+- name: Make sure heketi-client is installed
+ package: name=heketi-client state=present
+
+- name: Delete pre-existing heketi resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,jobs,dc,secret"
+ selector: "deploy-heketi"
+ - kind: "template,route,dc,service"
+ name: "heketi"
+ - kind: "svc,ep"
+ name: "heketi-storage-endpoints"
+ - kind: "sa"
+ name: "heketi-service-account"
+ failed_when: False
+ when: openshift_storage_glusterfs_heketi_wipe
+
+- name: Wait for deploy-heketi pods to terminate
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-pod"
+ register: heketi_pod
+ until: "heketi_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ when: openshift_storage_glusterfs_heketi_wipe
+
+- name: Wait for heketi pods to terminate
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ until: "heketi_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ when: openshift_storage_glusterfs_heketi_wipe
+
+- name: Create heketi service account
+ oc_serviceaccount:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ name: heketi-service-account
+ state: present
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Add heketi service account to privileged SCC
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Allow heketi service account to view/edit pods
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
+ resource_kind: role
+ resource_name: edit
+ state: present
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Check for existing deploy-heketi pod
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ state: list
+ kind: pod
+ selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ register: heketi_pod
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Check if need to deploy deploy-heketi
+ set_fact:
+ openshift_storage_glusterfs_heketi_deploy_is_missing: False
+ when:
+ - "openshift_storage_glusterfs_heketi_is_native"
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- name: Check for existing heketi pod
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ state: list
+ kind: pod
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Check if need to deploy heketi
+ set_fact:
+ openshift_storage_glusterfs_heketi_is_missing: False
+ when:
+ - "openshift_storage_glusterfs_heketi_is_native"
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- include: heketi_deploy_part1.yml
+ when:
+ - openshift_storage_glusterfs_heketi_is_native
+ - openshift_storage_glusterfs_heketi_deploy_is_missing
+ - openshift_storage_glusterfs_heketi_is_missing
+
+- name: Determine heketi URL
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ state: list
+ kind: ep
+ selector: "glusterfs in (deploy-heketi-service, heketi-service)"
+ register: heketi_url
+ until:
+ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+ - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ when:
+ - openshift_storage_glusterfs_heketi_is_native
+ - openshift_storage_glusterfs_heketi_url is undefined
+
+- name: Set heketi URL
+ set_fact:
+ openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ when:
+ - openshift_storage_glusterfs_heketi_is_native
+ - openshift_storage_glusterfs_heketi_url is undefined
+
+- name: Verify heketi service
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
+ changed_when: False
+
+- name: Generate topology file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+ dest: "{{ mktemp.stdout }}/topology.json"
+ when:
+ - openshift_storage_glusterfs_is_native
+ - openshift_storage_glusterfs_heketi_topology_load
+
+- name: Load heketi topology
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+ register: topology_load
+ failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
+ when:
+ - openshift_storage_glusterfs_is_native
+ - openshift_storage_glusterfs_heketi_topology_load
+
+- include: heketi_deploy_part2.yml
+ when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing
+
+- include: glusterfs_registry.yml
+ when: "openshift.hosted.registry.storage.kind == 'glusterfs'"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..d72d085c9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-registry-endpoints
+subsets:
+- addresses:
+{% for node in groups.oo_glusterfs_to_config %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2
new file mode 100644
index 000000000..eb5b4544f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2
@@ -0,0 +1,39 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in groups.oo_glusterfs_to_config -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+ "{{ hostvars[node].glusterfs_hostname | default(hostvars[node].openshift.common.hostname) }}"
+ ],
+ "storage": [
+ "{{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}"
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index c3d001bb4..fa9b20e92 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -7,8 +7,13 @@
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
# be used by default. Users must indicate what they want.
-- fail:
- msg: "Must specify openshift_release or openshift_image_tag in inventory to install origin. (suggestion: add openshift_release=\"1.2\" to inventory)"
+- name: Abort when we cannot safely guess what Origin image version the user wanted
+ fail:
+ msg: |-
+ To install a containerized Origin release, you must set openshift_release or
+ openshift_image_tag in your inventory to specify which version of the OpenShift
+ component images to use. You may want the latest (usually alpha) releases or
+ a more stable release. (Suggestion: add openshift_release="x.y" to inventory.)
when:
- is_containerized | bool
- openshift.common.deployment_type == 'origin'
@@ -27,7 +32,10 @@
when: openshift_release is defined
# Verify that the image tag is in a valid format
-- block:
+- when:
+ - openshift_image_tag is defined
+ - openshift_image_tag != "latest"
+ block:
# Verifies that when the deployment type is origin the version:
# - starts with a v
@@ -35,12 +43,14 @@
# It also allows for optional trailing data which:
# - must start with a dash
# - may contain numbers, letters, dashes and dots.
- - name: Verify Origin openshift_image_tag is valid
+ - name: (Origin) Verify openshift_image_tag is valid
+ when: openshift.common.deployment_type == 'origin'
assert:
that:
- "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
- msg: "openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1"
- when: openshift.common.deployment_type == 'origin'
+ msg: |-
+ openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
+ You specified openshift_image_tag={{ openshift_image_tag }}
# Verifies that when the deployment type is openshift-enterprise the version:
# - starts with a v
@@ -48,16 +58,14 @@
# It also allows for optional trailing data which:
# - must start with a dash
# - may contain numbers
- - name: Verify Enterprise openshift_image_tag is valid
+ - name: (Enterprise) Verify openshift_image_tag is valid
+ when: openshift.common.deployment_type == 'openshift-enterprise'
assert:
that:
- "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}"
- msg: "openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4"
- when: openshift.common.deployment_type == 'openshift-enterprise'
-
- when:
- - openshift_image_tag is defined
- - openshift_image_tag != "latest"
+ msg: |-
+ openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4
+ You specified openshift_image_tag={{ openshift_image_tag }}
# Make sure we copy this to a fact if given a var:
- set_fact:
@@ -119,30 +127,42 @@
- fail:
msg: openshift_version role was unable to set openshift_version
+ name: Abort if openshift_version was not set
when: openshift_version is not defined
- fail:
msg: openshift_version role was unable to set openshift_image_tag
+ name: Abort if openshift_image_tag was not set
when: openshift_image_tag is not defined
- fail:
msg: openshift_version role was unable to set openshift_pkg_version
+ name: Abort if openshift_pkg_version was not set
when: openshift_pkg_version is not defined
- fail:
- msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories."
+ msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
+ name: Abort if openshift_pkg_version was not set
when:
- not is_containerized | bool
- openshift_version == '0.0'
-# We can't map an openshift_release to full rpm version like we can with containers, make sure
+# We can't map an openshift_release to full rpm version like we can with containers; make sure
# the rpm version we looked up matches the release requested and error out if not.
-- fail:
- msg: "Detected OpenShift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories, inventory, or run the appropriate OpenShift upgrade playbook."
+- name: For an RPM install, abort when the release requested does not match the available version.
when:
- not is_containerized | bool
- openshift_release is defined
- - not openshift_version.startswith(openshift_release) | bool
+ assert:
+ that:
+ - openshift_version.startswith(openshift_release) | bool
+ msg: |-
+ You requested openshift_release {{ openshift_release }}, which is not matched by
+ the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ on host {{ inventory_hostname }}.
+ We will only install the latest RPMs, so please ensure you are getting the release
+ you expect. You may need to adjust your Ansible inventory, modify the repositories
+ available on the host, or run the appropriate OpenShift upgrade playbook.
# The end result of these three variables is quite important so make sure they are displayed and logged:
- debug: var=openshift_release