diff options
444 files changed, 6165 insertions, 4853 deletions
diff --git a/.redhat-ci.sh b/.redhat-ci.sh new file mode 100755 index 000000000..fce8c1d52 --- /dev/null +++ b/.redhat-ci.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -xeuo pipefail + +pip install -r requirements.txt + +# ping the nodes to check they're responding and register their ostree versions +ansible -vvv -i .redhat-ci.inventory nodes -a 'rpm-ostree status' + +upload_journals() { + mkdir journals + for node in master node1 node2; do + ssh ocp-$node 'journalctl --no-pager || true' > journals/ocp-$node.log + done +} + +trap upload_journals ERR + +# run the actual installer +ansible-playbook -vvv -i .redhat-ci.inventory playbooks/byo/config.yml + +# run a small subset of origin conformance tests to sanity +# check the cluster NB: we run it on the master since we may +# be in a different OSP network +ssh ocp-master docker run --rm --net=host --privileged \ + -v /etc/origin/master/admin.kubeconfig:/config fedora:25 sh -c \ + '"dnf install -y origin-tests && \ + KUBECONFIG=/config /usr/libexec/origin/extended.test --ginkgo.v=1 \ + --ginkgo.noColor --ginkgo.focus=\"Services.*NodePort|EmptyDir\""' diff --git a/.redhat-ci.yml b/.redhat-ci.yml index d9849ed60..6dac7b256 100644 --- a/.redhat-ci.yml +++ b/.redhat-ci.yml @@ -18,28 +18,13 @@ packages: - openssl-devel - redhat-rpm-config -context: 'fedora/25/atomic | origin/v1.5.0-rc.0' +context: 'fedora/25/atomic | origin/v3.6.0-alpha.1' env: - OPENSHIFT_IMAGE_TAG: v1.5.0-rc.0 + OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.1 tests: - - pip install ansible==2.2.2.0 # F25 currently has 2.2.1, so install from pypi - - ansible -vvv -i .redhat-ci.inventory nodes -a 'rpm-ostree status' - - ansible-playbook -vvv -i .redhat-ci.inventory playbooks/byo/config.yml - # run a small subset of origin conformance tests to sanity check the cluster - # NB: we run it on the master since we may be in a different OSP network - - ssh ocp-master docker run --rm --net=host --privileged - -v /etc/origin/master/admin.kubeconfig:/config fedora:25 sh -c - '"dnf install -y origin-tests && - KUBECONFIG=/config /usr/libexec/origin/extended.test --ginkgo.v=1 - --ginkgo.noColor --ginkgo.focus=\"Services.*NodePort|EmptyDir\""' + - ./.redhat-ci.sh ---- - -inherit: true - -context: 'fedora/25/atomic | origin/v3.6.0-alpha.0' - -env: - OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.0 +artifacts: + - journals/ diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 88bdb40a1..d9999ac9f 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.6.30-1 ./ +3.6.67-1 ./ diff --git a/.travis.yml b/.travis.yml index 245202139..1c549cec9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,6 +13,7 @@ python: - "3.5" install: + - pip install --upgrade pip - pip install tox-travis coveralls script: @@ -1,12 +1,11 @@ -# openshift-ansible RPM Build instructions +# openshift-ansible build instructions + +## Build openshift-ansible RPMs We use tito to make building and tracking revisions easy. For more information on tito, please see the [Tito home page](https://github.com/dgoodwin/tito "Tito home page"). - -## Build openshift-ansible - - Change into openshift-ansible ``` cd openshift-ansible @@ -24,3 +23,41 @@ tito tag ``` tito build --rpm ``` + +## Build an openshift-ansible container image + +To build a container image of `openshift-ansible` using standalone **Docker**: + + cd openshift-ansible + docker build -t openshift/openshift-ansible . + +Alternatively this can be built using on **OpenShift** using a [build and image stream](https://docs.openshift.org/latest/architecture/core_concepts/builds_and_image_streams.html) with this command: + + oc new-build docker.io/aweiteka/playbook2image~https://github.com/openshift/openshift-ansible + +The progress of the build can be monitored with: + + oc logs -f bc/openshift-ansible + +Once built, the image will be visible in the Image Stream created by the same command: + + oc describe imagestream openshift-ansible + +## Build the Atomic System Container + +A system container runs using runC instead of Docker and it is managed +by the [atomic](https://github.com/projectatomic/atomic/) tool. As it +doesn't require Docker to run, the installer can run on a node of the +cluster without interfering with the Docker daemon that is configured +by the installer itself. + +The first step is to build the [container image](#build-an-openshift-ansible-container-image) +as described before. The container image already contains all the +required files to run as a system container. + +Once the container image is built, we can import it into the OSTree +storage: + +``` +atomic pull --storage ostree docker:openshift/openshift-ansible:latest +``` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a3ae3fd10..1c0fa73ad 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,92 +3,103 @@ Thank you for contributing to OpenShift Ansible. This document explains how the repository is organized, and how to submit contributions. -## Introduction +**Table of Contents** -Before submitting code changes, get familiarized with these documents: +<!-- TOC depthFrom:2 depthTo:4 withLinks:1 updateOnSave:1 orderedList:0 --> -- [Core Concepts](https://github.com/openshift/openshift-ansible/blob/master/docs/core_concepts_guide.adoc) -- [Best Practices Guide](https://github.com/openshift/openshift-ansible/blob/master/docs/best_practices_guide.adoc) -- [Style Guide](https://github.com/openshift/openshift-ansible/blob/master/docs/style_guide.adoc) +- [Introduction](#introduction) +- [Submitting contributions](#submitting-contributions) +- [Running tests and other verification tasks](#running-tests-and-other-verification-tasks) + - [Running only specific tasks](#running-only-specific-tasks) +- [Appendix](#appendix) + - [Tricks](#tricks) + - [Activating a virtualenv managed by tox](#activating-a-virtualenv-managed-by-tox) + - [Limiting the unit tests that are run](#limiting-the-unit-tests-that-are-run) + - [Finding unused Python code](#finding-unused-python-code) -## Repository structure +<!-- /TOC --> -### Ansible +## Introduction -``` -. -├── inventory Contains dynamic inventory scripts, and examples of -│ Ansible inventories. -├── library Contains Python modules used by the playbooks. -├── playbooks Contains Ansible playbooks targeting multiple use cases. -└── roles Contains Ansible roles, units of shared behavior among - playbooks. -``` +Before submitting code changes, get familiarized with these documents: -#### Ansible plugins +- [Core Concepts](docs/core_concepts_guide.adoc) +- [Best Practices Guide](docs/best_practices_guide.adoc) +- [Style Guide](docs/style_guide.adoc) +- [Repository Structure](docs/repo_structure.md) -These are plugins used in playbooks and roles: +Please consider opening an issue or discussing on an existing one if you are +planning to work on something larger, to make sure your time investment is +something that can be merged to the repository. -``` -. -├── ansible-profile -├── callback_plugins -├── filter_plugins -└── lookup_plugins -``` +## Submitting contributions -### Scripts +1. [Fork](https://help.github.com/articles/fork-a-repo/) this repository and + [create a work branch in your fork](https://help.github.com/articles/github-flow/). +2. Go through the documents mentioned in the [introduction](#introduction). +3. Make changes and commit. You may want to review your changes and + [run tests](#running-tests-and-other-verification-tasks) before pushing your + branch. +4. [Open a Pull Request](https://help.github.com/articles/creating-a-pull-request/). + Give it a meaningful title explaining the changes you are proposing, and + then add further details in the description. + +One of the repository maintainers will then review the PR and trigger tests, and +possibly start a discussion that goes on until the PR is ready to be merged. +This process is further explained in the +[Pull Request process](docs/pull_requests.md) document. + +If you get no timely feedback from a project contributor / maintainer, sorry for +the delay. You can help us speed up triaging, reviewing and eventually merging +contributions by requesting a review or tagging in a comment +[someone who has worked on the files](https://help.github.com/articles/tracing-changes-in-a-file/) +you're proposing changes to. -``` -. -├── bin [DEPRECATED] Contains the `bin/cluster` script, a -│ wrapper around the Ansible playbooks that ensures proper -│ configuration, and facilitates installing, updating, -│ destroying and configuring OpenShift clusters. -│ Note: this tool is kept in the repository for legacy -│ reasons and will be removed at some point. -└── utils Contains the `atomic-openshift-installer` command, an - interactive CLI utility to install OpenShift across a - set of hosts. -``` +--- -### Documentation +**Note**: during the review process, you may add new commits to address review +comments or change existing commits. However, before getting your PR merged, +please [squash commits](https://help.github.com/articles/about-git-rebase/) to a +minimum set of meaningful commits. -``` -. -└── docs Contains documentation for this repository. -``` +If you've broken your work up into a set of sequential changes and each commit +pass the tests on their own then that's fine. If you've got commits fixing typos +or other problems introduced by previous commits in the same PR, then those +should be squashed before merging. -### Tests +If you are new to Git, these links might help: -``` -. -└── test Contains tests. -``` +- https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History +- http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html -## Building RPMs +--- -See the [RPM build instructions](BUILD.md). +## Running tests and other verification tasks -## Running tests +We use [`tox`](http://readthedocs.org/docs/tox/) to manage virtualenvs where +tests and other verification tasks are run. We use +[`pytest`](https://docs.pytest.org/) as our test runner. -We use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run -tests. Alternatively, tests can be run using -[detox](https://pypi.python.org/pypi/detox/) which allows for running tests in -parallel. +Alternatively to `tox`, one can use +[`detox`](https://pypi.python.org/pypi/detox/) for running verification tasks in +parallel. Note that while `detox` may be useful in development to make use of +multiple cores, it can be buggy at times and produce flakes, thus we do not use +it in our [CI](docs/continuous_integration.md) jobs. -Note: while `detox` may be useful in development to make use of multiple cores, -it can be buggy at times and produce flakes, thus we do not use it in our CI. +``` +pip install tox +``` +To run all tests and verification tasks: ``` -pip install tox detox +tox ``` --- -Note: before running `tox` or `detox`, ensure that the only virtualenvs within -the repository root are the ones managed by `tox`, those in a `.tox` +**Note**: before running `tox` or `detox`, ensure that the only virtualenvs +within the repository root are the ones managed by `tox`, those in a `.tox` subdirectory. Use this command to list paths that are likely part of a virtualenv not managed @@ -105,45 +116,52 @@ potentially fail. --- -List the test environments available: +### Running only specific tasks + +The [tox configuration](tox.ini) describes environments based on either Python 2 +or Python 3. Each environment is associated with a command that is executed in +the context of a virtualenv, with a specific version of Python, installed +dependencies, environment variables and so on. To list the environments +available: ``` tox -l ``` -Run all of the tests and linters with: +To run the command of a particular environment, e.g., `flake8` on Python 2.7: ``` -tox +tox -e py27-flake8 ``` -Run all of the tests linters in parallel (may flake): +To run the command of a particular environment in a clean virtualenv, e.g., +`pylint` on Python 3.5: ``` -detox +tox -re py35-pylint ``` -### Run only unit tests or some specific linter +The `-r` flag recreates existing environments, useful to force dependencies to +be reinstalled. -Run a particular test environment (`flake8` on Python 2.7 in this case): +## Appendix -``` -tox -e py27-flake8 -``` +### Tricks -Run a particular test environment in a clean virtualenv (`pylint` on Python 3.5 -in this case): +Here are some useful tips that might improve your workflow while working on this repository. -``` -tox -re py35-pylint -``` +#### Git Hooks -### Tricks +Git hooks are included in this repository to aid in development. Check +out the README in the +[hack/hooks](http://github.com/openshift/openshift-ansible/blob/master/hack/hooks/README.md) +directory for more information. #### Activating a virtualenv managed by tox -If you want to enter a virtualenv created by tox to do additional -testing/debugging (py27-flake8 env in this case): +If you want to enter a virtualenv created by tox to do additional debugging, you +can activate it just like any other virtualenv (py27-flake8 environment in this +example): ``` source .tox/py27-flake8/bin/activate @@ -182,32 +200,7 @@ $ tox -e py27-unit -- roles/lib_openshift/src/test/unit/test_oc_project.py -k te Among other things, this can be used for instance to see the coverage levels of individual modules as we work on improving tests. -## Submitting contributions - -1. Go through the guides from the [introduction](#Introduction). -2. Fork this repository, and create a work branch in your fork. -3. Make changes and commit. You may want to review your changes and run tests - before pushing your branch. -4. Open a Pull Request. - -One of the repository maintainers will then review the PR and submit it for -testing. - -The `default` test job is publicly accessible at -https://ci.openshift.redhat.com/jenkins/job/openshift-ansible/. The other jobs -are run on a different Jenkins host that is not publicly accessible, however the -test results are posted to S3 buckets when complete. - -The test output of each job is also posted to the Pull Request as comments. - -A trend of the time taken by merge jobs is available at -https://ci.openshift.redhat.com/jenkins/job/merge_pull_request_openshift_ansible/buildTimeTrend. - ---- - -## Appendix - -### Finding unused Python code +#### Finding unused Python code If you are contributing with Python code, you can use the tool [`vulture`](https://pypi.python.org/pypi/vulture) to verify that you are not @@ -54,7 +54,7 @@ you are not running a stable release. *** Requirements: - - Ansible >= 2.2.0 + - Ansible >= 2.2.2.0 - Jinja >= 2.7 - pyOpenSSL - python-lxml @@ -83,7 +83,10 @@ See [README_CONTAINER_IMAGE.md](README_CONTAINER_IMAGE.md) for information on ho See the [hooks documentation](HOOKS.md). - ## Contributing See the [contribution guide](CONTRIBUTING.md). + +## Building openshift-ansible RPMs and container images + +See the [build instructions](BUILD.md). diff --git a/README_CONTAINER_IMAGE.md b/README_CONTAINER_IMAGE.md index 2499e01d4..b78073100 100644 --- a/README_CONTAINER_IMAGE.md +++ b/README_CONTAINER_IMAGE.md @@ -1,43 +1,72 @@ # Containerized openshift-ansible to run playbooks -The [Dockerfile](Dockerfile) in this repository uses the [playbook2image](https://github.com/aweiteka/playbook2image) source-to-image base image to containerize `openshift-ansible`. The resulting image can run any of the provided playbooks. +The [Dockerfile](Dockerfile) in this repository uses the [playbook2image](https://github.com/aweiteka/playbook2image) source-to-image base image to containerize `openshift-ansible`. The resulting image can run any of the provided playbooks. See [BUILD.md](BUILD.md) for image build instructions. -**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes (i.e. run one of the config/upgrade playbooks) from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation. - -## Build - -To build a container image of `openshift-ansible`: - -1. Using standalone **Docker**: +The image is designed to **run as a non-root user**. The container's UID is mapped to the username `default` at runtime. Therefore, the container's environment reflects that user's settings, and the configuration should match that. For example `$HOME` is `/opt/app-root/src`, so ssh keys are expected to be under `/opt/app-root/src/.ssh`. If you ran a container as `root` you would have to adjust the container's configuration accordingly, e.g. by placing ssh keys under `/root/.ssh` instead. Nevertheless, the expectation is that containers will be run as non-root; for example, this container image can be run inside OpenShift under the default `restricted` [security context constraint](https://docs.openshift.org/latest/architecture/additional_concepts/authorization.html#security-context-constraints). - cd openshift-ansible - docker build -t openshift/openshift-ansible . - -1. Using an **OpenShift** build: - - oc new-build docker.io/aweiteka/playbook2image~https://github.com/openshift/openshift-ansible - oc describe imagestream openshift-ansible +**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes (i.e. run one of the config/upgrade playbooks) from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation. ## Usage The `playbook2image` base image provides several options to control the behaviour of the containers. For more details on these options see the [playbook2image](https://github.com/aweiteka/playbook2image) documentation. -At the very least, when running a container using an image built this way you must specify: +At the very least, when running a container you must specify: -1. An **inventory** file. This can be mounted inside the container as a volume and specified with the `INVENTORY_FILE` environment variable. Alternatively you can serve the inventory file from a web server and use the `INVENTORY_URL` environment variable to fetch it. -1. **ssh keys** so that Ansible can reach your hosts. These should be mounted as a volume under `/opt/app-root/src/.ssh` -1. The **playbook** to run. This is set using the `PLAYBOOK_FILE` environment variable. If you don't specify a playbook the [`openshift_facts`](playbooks/byo/openshift_facts.yml) playbook will be run to collecting and show facts about your OpenShift environment. +1. An **inventory**. This can be a location inside the container (possibly mounted as a volume) with a path referenced via the `INVENTORY_FILE` environment variable. Alternatively you can serve the inventory file from a web server and use the `INVENTORY_URL` environment variable to fetch it, or `DYNAMIC_SCRIPT_URL` to download a script that provides a dynamic inventory. -Here is an example of how to run a containerized `openshift-ansible` playbook that will check the expiration dates of OpenShift's internal certificates using the [`openshift_certificate_expiry` role](roles/openshift_certificate_expiry). The inventory and ssh keys are mounted as volumes (the latter requires setting the uid in the container and SELinux label in the key file via `:Z` so they can be accessed) and the `PLAYBOOK_FILE` environment variable is set to point to an example certificate check playbook that is already part of the image: +1. **ssh keys** so that Ansible can reach your hosts. These should be mounted as a volume under `/opt/app-root/src/.ssh` under normal usage (i.e. when running the container as non-root). + +1. The **playbook** to run. This is set using the `PLAYBOOK_FILE` environment variable. If you don't specify a playbook the [`openshift_facts`](playbooks/byo/openshift_facts.yml) playbook will be run to collect and show facts about your OpenShift environment. + +Here is an example of how to run a containerized `openshift-ansible` playbook that will check the expiration dates of OpenShift's internal certificates using the [`openshift_certificate_expiry` role](roles/openshift_certificate_expiry): docker run -u `id -u` \ -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z \ -v /etc/ansible/hosts:/tmp/inventory \ -e INVENTORY_FILE=/tmp/inventory \ - -e OPTS="-v" \ -e PLAYBOOK_FILE=playbooks/certificate_expiry/default.yaml \ + -e OPTS="-v" -t \ openshift/openshift-ansible -Further usage examples are available in the [examples directory](examples/). +You might want to adjust some of the options in the example to match your environment and/or preferences. For example: you might want to create a separate directory on the host where you'll copy the ssh key and inventory files prior to invocation to avoid unwanted SELinux re-labeling of the original files or paths (see below). + +Here is a detailed explanation of the options used in the command above: + +* ``-u `id -u` `` makes the container run with the same UID as the current user, which is required for permissions so that the ssh key can be read inside the container (ssh private keys are expected to be readable only by their owner). Usually you would invoke `docker run` as a non-root user that has privileges to run containers and leave that option as is. + +* `-v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z` mounts your ssh key (`$HOME/.ssh/id_rsa`) under the `default` user's `$HOME/.ssh` in the container (as explained above, `/opt/app-root/src` is the `$HOME` of the `default` user in the container). If you mount the ssh key into a non-standard location you can add an environment variable with `-e ANSIBLE_PRIVATE_KEY_FILE=/the/mount/point` or set `ansible_ssh_private_key_file=/the/mount/point` as a variable in the inventory to point Ansible at it. + + Note that the ssh key is mounted with the `:Z` flag: this is also required so that the container can read the ssh key from its restricted SELinux context; this means that *your original ssh key file will be re-labeled* to something like `system_u:object_r:container_file_t:s0:c113,c247`. For more details about `:Z` please check the `docker-run(1)` man page. Please keep this in mind when providing these volume mount specifications because this could have unexpected consequences: for example, if you mount (and therefore re-label) your whole `$HOME/.ssh` directory you will block `sshd` from accessing your keys. This is a reason why you might want to work on a separate copy of the ssh key, so that the original file's labels remain untouched. + +* `-v /etc/ansible/hosts:/tmp/inventory` and `-e INVENTORY_FILE=/tmp/inventory` mount the Ansible inventory file into the container as `/tmp/inventory` and set the corresponding environment variable to point at it respectively. The example uses `/etc/ansible/hosts` as the inventory file as this is a default location, but your inventory is likely to be elsewhere so please adjust as needed. Note that depending on the file you point to you might have to handle SELinux labels in a similar way as with the ssh keys, e.g. by adding a `:z` flag to the volume mount, so again you might prefer to copy the inventory to a dedicated location first. + +* `-e PLAYBOOK_FILE=playbooks/certificate_expiry/default.yaml` specifies the playbook to run as a relative path from the top level directory of openshift-ansible. + +* `-e OPTS="-v"` and `-t` make the output look nicer: the `default.yaml` playbook does not generate results and runs quietly unless we add the `-v` option to the `ansible-playbook` invocation, and a TTY is allocated via `-t` so that Ansible adds color to the output. + +Further usage examples are available in the [examples directory](examples/) with samples of how to use the image from within OpenShift. Additional usage information for images built from `playbook2image` like this one can be found in the [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples). + +## Running openshift-ansible as a System Container + +Building the System Container: See the [BUILD.md](BUILD.md). + +Copy ssh public key of the host machine to master and nodes machines in the cluster. + +If the inventory file needs additional files then it can use the path `/var/lib/openshift-installer` in the container as it is bind mounted from the host (controllable with `VAR_LIB_OPENSHIFT_INSTALLER`). + +Run the ansible system container: + +```sh +atomic install --system --set INVENTORY_FILE=$(pwd)/inventory.origin openshift/openshift-ansible +systemctl start openshift-ansible +``` + +The `INVENTORY_FILE` variable says to the installer what inventory file on the host will be bind mounted inside the container. In the example above, a file called `inventory.origin` in the current directory is used as the inventory file for the installer. + +And to finally cleanup the container: + +``` +atomic uninstall openshift-ansible +``` diff --git a/bin/cluster b/bin/cluster index b9b2ab15f..f77eb36ad 100755 --- a/bin/cluster +++ b/bin/cluster @@ -1,5 +1,4 @@ #!/usr/bin/env python2 -# vim: expandtab:tabstop=4:shiftwidth=4 import argparse import ConfigParser diff --git a/callback_plugins/aa_version_requirement.py b/callback_plugins/aa_version_requirement.py index f31445381..20bdd9056 100644 --- a/callback_plugins/aa_version_requirement.py +++ b/callback_plugins/aa_version_requirement.py @@ -7,7 +7,6 @@ The plugin is named with leading `aa_` to ensure this plugin is loaded first (alphanumerically) by Ansible. """ import sys -from subprocess import check_output from ansible import __version__ if __version__ < '2.0': @@ -30,13 +29,8 @@ else: # Set to minimum required Ansible version -REQUIRED_VERSION = '2.2.0.0' -DESCRIPTION = "Supported versions: %s or newer (except 2.2.1.0)" % REQUIRED_VERSION -FAIL_ON_2_2_1_0 = "There are known issues with Ansible version 2.2.1.0 which " \ - "are impacting OpenShift-Ansible. Please use Ansible " \ - "version 2.2.0.0 or a version greater than 2.2.1.0. " \ - "See this issue for more details: " \ - "https://github.com/openshift/openshift-ansible/issues/3111" +REQUIRED_VERSION = '2.2.2.0' +DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION def version_requirement(version): @@ -64,13 +58,3 @@ class CallbackModule(CallbackBase): 'FATAL: Current Ansible version (%s) is not supported. %s' % (__version__, DESCRIPTION), color='red') sys.exit(1) - - if __version__ == '2.2.1.0': - rpm_ver = str(check_output(["rpm", "-qa", "ansible"])) - patched_ansible = '2.2.1.0-2' - - if patched_ansible not in rpm_ver: - display( - 'FATAL: Current Ansible version (%s) is not supported. %s' - % (__version__, FAIL_ON_2_2_1_0), color='red') - sys.exit(1) diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc index 7f3d85d40..e66c5addb 100644 --- a/docs/best_practices_guide.adoc +++ b/docs/best_practices_guide.adoc @@ -11,44 +11,9 @@ All new pull requests created against this repository MUST comply with this guid This guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119]. -== Pull Requests - - - -[[All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged]] -[cols="2v,v"] -|=== -| <<All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged, Rule>> -| All pull requests MUST pass the build bot *before* they are merged. -|=== - -The purpose of this rule is to avoid cases where the build bot will fail pull requests for code modified in a previous pull request. - -The tooling is flexible enough that exceptions can be made so that the tool the build bot is running will ignore certain areas or certain checks, but the build bot itself must pass for the pull request to be merged. - - == Python -=== Python Source Files - -''' -[[Python-source-files-MUST-contain-the-following-vim-mode-line]] -[cols="2v,v"] -|=== -| <<Python-source-files-MUST-contain-the-following-vim-mode-line, Rule>> -| Python source files MUST contain the following vim mode line. -|=== - -[source] ----- -# vim: expandtab:tabstop=4:shiftwidth=4 ----- - -Since most developers contributing to this repository use vim, this rule helps to promote consistency. - -If mode lines for other editors are needed, please open a GitHub issue. - === Method Signatures ''' @@ -509,12 +474,12 @@ The Ansible `package` module calls the associated package manager for the underl # tasks.yml - name: Install etcd (for etcdctl) yum: name=etcd state=latest - when: "ansible_pkg_mgr == yum" + when: ansible_pkg_mgr == yum register: install_result - name: Install etcd (for etcdctl) dnf: name=etcd state=latest - when: "ansible_pkg_mgr == dnf" + when: ansible_pkg_mgr == dnf register: install_result ---- diff --git a/docs/pull_requests.md b/docs/pull_requests.md new file mode 100644 index 000000000..fcc3e275c --- /dev/null +++ b/docs/pull_requests.md @@ -0,0 +1,95 @@ +# Pull Request process + +Pull Requests in the `openshift-ansible` project follow a +[Continuous](https://en.wikipedia.org/wiki/Continuous_integration) +[Integration](https://martinfowler.com/articles/continuousIntegration.html) +process that is similar to the process observed in other repositories such as +[`origin`](https://github.com/openshift/origin). + +Whenever a +[Pull Request is opened](../CONTRIBUTING.md#submitting-contributions), some +automated test jobs must be successfully run before the PR can be merged. + +Some of these jobs are automatically triggered, e.g., Travis and Coveralls. +Other jobs need to be manually triggered by a member of the +[Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors). + +## Triggering tests + +We have two different Jenkins infrastructures, and, while that holds true, there +are two commands that trigger a different set of test jobs. We are working on +simplifying the workflow towards a single infrastructure in the future. + +- **Test jobs on the older infrastructure** + + Members of the [OpenShift organization](https://github.com/orgs/openshift/people) + can trigger the set of test jobs in the older infrastructure by writing a + comment with the exact text `aos-ci-test` and nothing else. + + The Jenkins host is not publicly accessible. Test results are posted to S3 + buckets when complete, and links are available both at the bottom of the Pull + Request page and as comments posted by + [@openshift-bot](https://github.com/openshift-bot). + +- **Test jobs on the newer infrastructure** + + Members of the + [Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors) + can trigger the set of test jobs in the newer infrastructure by writing a + comment containing `[test]` anywhere in the comment body. + + The [Jenkins host](https://ci.openshift.redhat.com/jenkins/job/test_pull_request_openshift_ansible/) + is publicly accessible. Like for the older infrastructure, the result of each + job is also posted to the Pull Request as comments and summarized at the + bottom of the Pull Request page. + +### Fedora tests + +There are a set of tests that run on Fedora infrastructure. They are started +automatically with every pull request. + +They are implemented using the [`redhat-ci` framework](https://github.com/jlebon/redhat-ci). + +To re-run tests, write a comment containing `bot, retest this please`. + +## Triggering merge + +After a PR is properly reviewed and a set of +[required jobs](https://github.com/openshift/aos-cd-jobs/blob/master/sjb/test_status_config.yml) +reported successfully, it can be tagged for merge by a member of the +[Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors) +by writing a comment containing `[merge]` anywhere in the comment body. + +Tagging a Pull Request for merge puts it in an automated merge queue. The +[@openshift-bot](https://github.com/openshift-bot) monitors the queue and merges +PRs that pass all of the required tests. + +### Manual merges + +The normal process described above should be followed: `aos-ci-test` and +`[test]` / `[merge]`. + +In exceptional cases, such as when known problems with the merge queue prevent +PRs from being merged, a PR may be manually merged if _all_ of these conditions +are true: + +- [ ] Travis job must have passed (as enforced by GitHub) +- [ ] Must have passed `aos-ci-test` (as enforced by GitHub) +- [ ] Must have a positive review (as enforced by GitHub) +- [ ] Must have failed the `[merge]` queue with a reported flake at least twice +- [ ] Must have [issues labeled kind/test-flake](https://github.com/openshift/origin/issues?q=is%3Aopen+is%3Aissue+label%3Akind%2Ftest-flake) in [Origin](https://github.com/openshift/origin) linked in comments for the failures +- [ ] Content must not have changed since all of the above conditions have been met (no rebases, no new commits) + +This exception is temporary and should be completely removed in the future once +the merge queue has become more stable. + +Only members of the +[Team OpenShift Ansible Committers](https://github.com/orgs/openshift/teams/team-openshift-ansible-committers) +can perform manual merges. + +## Useful links + +- Repository containing Jenkins job definitions: https://github.com/openshift/aos-cd-jobs +- List of required successful jobs before merge: https://github.com/openshift/aos-cd-jobs/blob/master/sjb/test_status_config.yml +- Source code of the bot responsible for testing and merging PRs: https://github.com/openshift/test-pull-requests/ +- Trend of the time taken by merge jobs: https://ci.openshift.redhat.com/jenkins/job/merge_pull_request_openshift_ansible/buildTimeTrend diff --git a/docs/repo_structure.md b/docs/repo_structure.md new file mode 100644 index 000000000..693837fba --- /dev/null +++ b/docs/repo_structure.md @@ -0,0 +1,54 @@ +# Repository structure + +### Ansible + +``` +. +├── inventory Contains dynamic inventory scripts, and examples of +│ Ansible inventories. +├── library Contains Python modules used by the playbooks. +├── playbooks Contains Ansible playbooks targeting multiple use cases. +└── roles Contains Ansible roles, units of shared behavior among + playbooks. +``` + +#### Ansible plugins + +These are plugins used in playbooks and roles: + +``` +. +├── ansible-profile +├── callback_plugins +├── filter_plugins +└── lookup_plugins +``` + +### Scripts + +``` +. +├── bin [DEPRECATED] Contains the `bin/cluster` script, a +│ wrapper around the Ansible playbooks that ensures proper +│ configuration, and facilitates installing, updating, +│ destroying and configuring OpenShift clusters. +│ Note: this tool is kept in the repository for legacy +│ reasons and will be removed at some point. +└── utils Contains the `atomic-openshift-installer` command, an + interactive CLI utility to install OpenShift across a + set of hosts. +``` + +### Documentation + +``` +. +└── docs Contains documentation for this repository. +``` + +### Tests + +``` +. +└── test Contains tests. +``` diff --git a/examples/README.md b/examples/README.md index 0e412244d..d54752fb9 100644 --- a/examples/README.md +++ b/examples/README.md @@ -69,19 +69,19 @@ To run these examples we prepare the inventory and ssh keys as in the other exam Additionally we allocate a `PersistentVolumeClaim` to store the reports: - oc create -f - <<PVC - --- - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: certcheck-reports - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - PVC + oc create -f - <<PVC + --- + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: certcheck-reports + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + PVC With that we can run the `Job` once: diff --git a/examples/certificate-check-upload.yaml b/examples/certificate-check-upload.yaml index b10a0b614..8b560447f 100644 --- a/examples/certificate-check-upload.yaml +++ b/examples/certificate-check-upload.yaml @@ -20,28 +20,34 @@ kind: Job metadata: name: certificate-check spec: - containers: - - name: openshift-ansible - image: openshift/openshift-ansible - env: - - name: PLAYBOOK_FILE - value: playbooks/certificate_expiry/easy-mode-upload.yaml - - name: INVENTORY_FILE - value: /tmp/inventory/hosts # from configmap vol below - - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below - value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey - - name: CERT_EXPIRY_WARN_DAYS - value: "45" # must be a string, don't forget the quotes - volumeMounts: - - name: sshkey - mountPath: /opt/app-root/src/.ssh/id_rsa - - name: inventory - mountPath: /tmp/inventory - volumes: - - name: sshkey - secret: - secretName: sshkey - - name: inventory - configMap: - name: inventory - restartPolicy: Never + parallelism: 1 + completions: 1 + template: + metadata: + name: certificate-check + spec: + containers: + - name: openshift-ansible + image: openshift/openshift-ansible + env: + - name: PLAYBOOK_FILE + value: playbooks/certificate_expiry/easy-mode-upload.yaml + - name: INVENTORY_FILE + value: /tmp/inventory/hosts # from configmap vol below + - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below + value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey + - name: CERT_EXPIRY_WARN_DAYS + value: "45" # must be a string, don't forget the quotes + volumeMounts: + - name: sshkey + mountPath: /opt/app-root/src/.ssh/id_rsa + - name: inventory + mountPath: /tmp/inventory + volumes: + - name: sshkey + secret: + secretName: sshkey + - name: inventory + configMap: + name: inventory + restartPolicy: Never diff --git a/examples/certificate-check-volume.yaml b/examples/certificate-check-volume.yaml index c19dc1f88..f6613bcd8 100644 --- a/examples/certificate-check-volume.yaml +++ b/examples/certificate-check-volume.yaml @@ -22,33 +22,39 @@ kind: Job metadata: name: certificate-check spec: - containers: - - name: openshift-ansible - image: openshift/openshift-ansible - env: - - name: PLAYBOOK_FILE - value: playbooks/certificate_expiry/html_and_json_timestamp.yaml - - name: INVENTORY_FILE - value: /tmp/inventory/hosts # from configmap vol below - - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below - value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey - - name: CERT_EXPIRY_WARN_DAYS - value: "45" # must be a string, don't forget the quotes - volumeMounts: - - name: sshkey - mountPath: /opt/app-root/src/.ssh/id_rsa - - name: inventory - mountPath: /tmp/inventory - - name: reports - mountPath: /var/lib/certcheck - volumes: - - name: sshkey - secret: - secretName: sshkey - - name: inventory - configMap: - name: inventory - - name: reports - persistentVolumeClaim: - claimName: certcheck-reports - restartPolicy: Never + parallelism: 1 + completions: 1 + template: + metadata: + name: certificate-check + spec: + containers: + - name: openshift-ansible + image: openshift/openshift-ansible + env: + - name: PLAYBOOK_FILE + value: playbooks/certificate_expiry/html_and_json_timestamp.yaml + - name: INVENTORY_FILE + value: /tmp/inventory/hosts # from configmap vol below + - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below + value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey + - name: CERT_EXPIRY_WARN_DAYS + value: "45" # must be a string, don't forget the quotes + volumeMounts: + - name: sshkey + mountPath: /opt/app-root/src/.ssh/id_rsa + - name: inventory + mountPath: /tmp/inventory + - name: reports + mountPath: /var/lib/certcheck + volumes: + - name: sshkey + secret: + secretName: sshkey + - name: inventory + configMap: + name: inventory + - name: reports + persistentVolumeClaim: + claimName: certcheck-reports + restartPolicy: Never diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 10c8600ba..8b279981d 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 # pylint: disable=too-many-lines """ Custom filters for use in openshift-ansible @@ -11,6 +10,7 @@ import pdb import random import re +from base64 import b64encode from collections import Mapping # pylint no-name-in-module and import-error disabled here because pylint # fails to properly detect the packages when installed in a virtualenv @@ -672,8 +672,7 @@ def oo_generate_secret(num_bytes): if not isinstance(num_bytes, int): raise errors.AnsibleFilterError("|failed expects num_bytes is int") - secret = os.urandom(num_bytes) - return secret.encode('base-64').strip() + return b64encode(os.urandom(num_bytes)).decode('utf-8') def to_padded_yaml(data, level=0, indent=2, **kw): diff --git a/filter_plugins/openshift_node.py b/filter_plugins/openshift_node.py index 8c7302052..cad95ea6d 100644 --- a/filter_plugins/openshift_node.py +++ b/filter_plugins/openshift_node.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Custom filters for use in openshift-node ''' diff --git a/filter_plugins/openshift_version.py b/filter_plugins/openshift_version.py index 1403e9dcc..809e82488 100644 --- a/filter_plugins/openshift_version.py +++ b/filter_plugins/openshift_version.py @@ -1,7 +1,5 @@ #!/usr/bin/python - # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 """ Custom version comparison filters for use in openshift-ansible """ diff --git a/hack/hooks/README.md b/hack/hooks/README.md new file mode 100644 index 000000000..ef870540a --- /dev/null +++ b/hack/hooks/README.md @@ -0,0 +1,37 @@ +# OpenShift-Ansible Git Hooks + +## Introduction + +This `hack` sub-directory holds +[git commit hooks](https://www.atlassian.com/git/tutorials/git-hooks#conceptual-overview) +you may use when working on openshift-ansible contributions. See the +README in each sub-directory for an overview of what each hook does +and if the hook has any specific usage or setup instructions. + +## Usage + +Basic git hook usage is simple: + +1) Copy (or symbolic link) the hook to the `$REPO_ROOT/.git/hooks/` directory +2) Make the hook executable (`chmod +x $PATH_TO_HOOK`) + +## Multiple Hooks of the Same Type + +If you want to install multiple hooks of the same type, for example: +multiple `pre-commit` hooks, you will need some kind of *hook +dispatcher*. For an example of an easy to use hook dispatcher check +out this gist by carlos-jenkins: + +* [multihooks.py](https://gist.github.com/carlos-jenkins/89da9dcf9e0d528ac978311938aade43) + +## Contributing Hooks + +If you want to contribute a new hook there are only a few criteria +that must be met: + +* The hook **MUST** include a README describing the purpose of the hook +* The README **MUST** describe special setup instructions if they are required +* The hook **MUST** be in a sub-directory of this directory +* The hook file **MUST** be named following the standard git hook + naming pattern (i.e., pre-commit hooks **MUST** be called + `pre-commit`) diff --git a/hack/hooks/verify_generated_modules/README.md b/hack/hooks/verify_generated_modules/README.md new file mode 100644 index 000000000..093fcf76a --- /dev/null +++ b/hack/hooks/verify_generated_modules/README.md @@ -0,0 +1,19 @@ +# Verify Generated Modules + +Pre-commit hook for verifying that generated library modules match +their EXPECTED content. Library modules are generated from fragments +under the `roles/lib_(openshift|utils)/src/` directories. + +If the attempted commit modified files under the +`roles/lib_(openshift|utils)/` directories this script will run the +`generate.py --verify` command. + +This script will **NOT RUN** if module source fragments are modified +but *not part of the commit*. I.e., you can still make commits if you +modified module fragments AND other files but are *not comitting the +the module fragments*. + +# Setup Instructions + +Standard installation procedure. Copy the hook to the `.git/hooks/` +directory and ensure it is executable. diff --git a/hack/hooks/verify_generated_modules/pre-commit b/hack/hooks/verify_generated_modules/pre-commit new file mode 100755 index 000000000..8a319fd7e --- /dev/null +++ b/hack/hooks/verify_generated_modules/pre-commit @@ -0,0 +1,55 @@ +#!/bin/sh + +###################################################################### +# Pre-commit hook for verifying that generated library modules match +# their EXPECTED content. Library modules are generated from fragments +# under the 'roles/lib_(openshift|utils)/src/' directories. +# +# If the attempted commit modified files under the +# 'roles/lib_(openshift|utils)/' directories this script will run the +# 'generate.py --verify' command. +# +# This script will NOT RUN if module source fragments are modified but +# not part of the commit. I.e., you can still make commits if you +# modified module fragments AND other files but are not comitting the +# the module fragments. + +# Did the commit modify any source module files? +CHANGES=`git diff-index --stat --cached HEAD | grep -E '^ roles/lib_(openshift|utils)/src/(class|doc|ansible|lib)/'` +RET_CODE=$? +ABORT=0 + +if [ "${RET_CODE}" -eq "0" ]; then + # Modifications detected. Run the verification scripts. + + # Which was it? + if $(echo $CHANGES | grep -q 'roles/lib_openshift/'); then + echo "Validating lib_openshift..." + ./roles/lib_openshift/src/generate.py --verify + if [ "${?}" -ne "0" ]; then + ABORT=1 + fi + fi + + if $(echo $CHANGES | grep -q 'roles/lib_utils/'); then + echo "Validating lib_utils..." + ./roles/lib_utils/src/generate.py --verify + if [ "${?}" -ne "0" ]; then + ABORT=1 + fi + fi + + if [ "${ABORT}" -eq "1" ]; then + cat <<EOF + +ERROR: Module verification failed. Generated files do not match fragments. + +Choices to continue: + 1) Run './roles/lib_(openshift|utils)/src/generate.py' from the root of + the repo to regenerate the files + 2) Skip verification with '--no-verify' option to 'git commit' +EOF + fi +fi + +exit $ABORT diff --git a/Dockerfile b/images/installer/Dockerfile index eecf3630b..1df887f32 100644 --- a/Dockerfile +++ b/images/installer/Dockerfile @@ -16,6 +16,12 @@ LABEL name="openshift-ansible" \ USER root +# Create a symlink to /opt/app-root/src so that files under /usr/share/ansible are accessible. +# This is required since the system-container uses by default the playbook under +# /usr/share/ansible/openshift-ansible. With this change we won't need to keep two different +# configurations for the two images. +RUN mkdir -p /usr/share/ansible/ && ln -s /opt/app-root/src /usr/share/ansible/openshift-ansible + RUN INSTALL_PKGS="skopeo" && \ yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ rpm -V $INSTALL_PKGS && \ @@ -39,4 +45,7 @@ ADD . /tmp/src # as per the INSTALL_OC environment setting above RUN /usr/libexec/s2i/assemble +# Add files for running as a system container +COPY system-container/root / + CMD [ "/usr/libexec/s2i/run" ] diff --git a/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7 index 0d5a6038a..00841e660 100644 --- a/Dockerfile.rhel7 +++ b/images/installer/Dockerfile.rhel7 @@ -20,9 +20,10 @@ LABEL name="openshift3/openshift-ansible" \ # because all content and dependencies (like 'oc') is already # installed via yum. USER root -RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients" && \ +RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto" && \ yum repolist > /dev/null && \ yum-config-manager --enable rhel-7-server-ose-3.4-rpms && \ + yum-config-manager --enable rhel-7-server-rh-common-rpms && \ yum install -y $INSTALL_PKGS && \ yum clean all @@ -38,4 +39,7 @@ ENV PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \ WORK_DIR=/usr/share/ansible/openshift-ansible \ OPTS="-v" +# Add files for running as a system container +COPY system-container/root / + CMD [ "/usr/libexec/s2i/run" ] diff --git a/images/installer/system-container/README.md b/images/installer/system-container/README.md new file mode 100644 index 000000000..dc95307e5 --- /dev/null +++ b/images/installer/system-container/README.md @@ -0,0 +1,13 @@ +# System container installer + +These files are needed to run the installer using an [Atomic System container](http://www.projectatomic.io/blog/2016/09/intro-to-system-containers/). + +* config.json.template - Template of the configuration file used for running containers. + +* manifest.json - Used to define various settings for the system container, such as the default values to use for the installation. + +* run-system-container.sh - Entrypoint to the container. + +* service.template - Template file for the systemd service. + +* tmpfiles.template - Template file for systemd-tmpfiles. diff --git a/images/installer/system-container/root/exports/config.json.template b/images/installer/system-container/root/exports/config.json.template new file mode 100644 index 000000000..383e3696e --- /dev/null +++ b/images/installer/system-container/root/exports/config.json.template @@ -0,0 +1,223 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "os": "linux", + "arch": "amd64" + }, + "process": { + "terminal": false, + "consoleSize": { + "height": 0, + "width": 0 + }, + "user": { + "uid": 0, + "gid": 0 + }, + "args": [ + "/usr/local/bin/run-system-container.sh" + ], + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "TERM=xterm", + "OPTS=$OPTS", + "PLAYBOOK_FILE=$PLAYBOOK_FILE" + ], + "cwd": "/opt/app-root/src/", + "rlimits": [ + { + "type": "RLIMIT_NOFILE", + "hard": 1024, + "soft": 1024 + } + ], + "noNewPrivileges": true + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "mounts": [ + { + "destination": "/proc", + "type": "proc", + "source": "proc" + }, + { + "destination": "/dev", + "type": "tmpfs", + "source": "tmpfs", + "options": [ + "nosuid", + "strictatime", + "mode=755", + "size=65536k" + ] + }, + { + "destination": "/dev/pts", + "type": "devpts", + "source": "devpts", + "options": [ + "nosuid", + "noexec", + "newinstance", + "ptmxmode=0666", + "mode=0620", + "gid=5" + ] + }, + { + "destination": "/dev/shm", + "type": "tmpfs", + "source": "shm", + "options": [ + "nosuid", + "noexec", + "nodev", + "mode=1777", + "size=65536k" + ] + }, + { + "destination": "/dev/mqueue", + "type": "mqueue", + "source": "mqueue", + "options": [ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination": "/sys", + "type": "sysfs", + "source": "sysfs", + "options": [ + "nosuid", + "noexec", + "nodev", + "ro" + ] + }, + { + "type": "bind", + "source": "$SSH_ROOT", + "destination": "/opt/app-root/src/.ssh", + "options": [ + "bind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "$SSH_ROOT", + "destination": "/root/.ssh", + "options": [ + "bind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "$VAR_LIB_OPENSHIFT_INSTALLER", + "destination": "/var/lib/openshift-installer", + "options": [ + "bind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "$VAR_LOG_OPENSHIFT_LOG", + "destination": "/var/log/ansible.log", + "options": [ + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/root/.ansible", + "type": "tmpfs", + "source": "tmpfs", + "options": [ + "nosuid", + "strictatime", + "mode=755" + ] + }, + { + "destination": "/tmp", + "type": "tmpfs", + "source": "tmpfs", + "options": [ + "nosuid", + "strictatime", + "mode=755" + ] + }, + { + "type": "bind", + "source": "$INVENTORY_FILE", + "destination": "/etc/ansible/hosts", + "options": [ + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/sys/fs/cgroup", + "type": "cgroup", + "source": "cgroup", + "options": [ + "nosuid", + "noexec", + "nodev", + "relatime", + "ro" + ] + } + ], + "hooks": { + + }, + "linux": { + "resources": { + "devices": [ + { + "allow": false, + "access": "rwm" + } + ] + }, + "namespaces": [ + { + "type": "pid" + }, + { + "type": "mount" + } + ], + "maskedPaths": [ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/sys/firmware" + ], + "readonlyPaths": [ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ] + } +} diff --git a/images/installer/system-container/root/exports/manifest.json b/images/installer/system-container/root/exports/manifest.json new file mode 100644 index 000000000..1db845965 --- /dev/null +++ b/images/installer/system-container/root/exports/manifest.json @@ -0,0 +1,11 @@ +{ + "version": "1.0", + "defaultValues": { + "OPTS": "", + "VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer", + "VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log", + "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml", + "SSH_ROOT": "/root/.ssh", + "INVENTORY_FILE": "/dev/null" + } +} diff --git a/images/installer/system-container/root/exports/service.template b/images/installer/system-container/root/exports/service.template new file mode 100644 index 000000000..bf5316af6 --- /dev/null +++ b/images/installer/system-container/root/exports/service.template @@ -0,0 +1,6 @@ +[Service] +ExecStart=$EXEC_START +ExecStop=-$EXEC_STOP +Restart=no +WorkingDirectory=$DESTDIR +Type=oneshot diff --git a/images/installer/system-container/root/exports/tmpfiles.template b/images/installer/system-container/root/exports/tmpfiles.template new file mode 100644 index 000000000..b1f6caf47 --- /dev/null +++ b/images/installer/system-container/root/exports/tmpfiles.template @@ -0,0 +1,2 @@ +d $VAR_LIB_OPENSHIFT_INSTALLER - - - - - +f $VAR_LOG_OPENSHIFT_LOG - - - - - diff --git a/images/installer/system-container/root/usr/local/bin/run-system-container.sh b/images/installer/system-container/root/usr/local/bin/run-system-container.sh new file mode 100755 index 000000000..9ce7c7328 --- /dev/null +++ b/images/installer/system-container/root/usr/local/bin/run-system-container.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +export ANSIBLE_LOG_PATH=/var/log/ansible.log +exec ansible-playbook -i /etc/ansible/hosts ${OPTS} ${PLAYBOOK_FILE} diff --git a/inventory/byo/hosts.byo.native-glusterfs.example b/inventory/byo/hosts.byo.native-glusterfs.example new file mode 100644 index 000000000..2dbb57d40 --- /dev/null +++ b/inventory/byo/hosts.byo.native-glusterfs.example @@ -0,0 +1,51 @@ +# This is an example of a bring your own (byo) host inventory for a cluster +# with natively hosted, containerized GlusterFS storage. +# +# This inventory may be used with the byo/config.yml playbook to deploy a new +# cluster with GlusterFS storage, which will use that storage to create a +# volume that will provide backend storage for a hosted Docker registry. +# +# This inventory may also be used with byo/openshift-glusterfs/config.yml to +# deploy GlusterFS storage on an existing cluster. With this playbook, the +# registry backend volume will be created but the administrator must then +# either deploy a hosted registry or change an existing hosted registry to use +# that volume. +# +# There are additional configuration parameters that can be specified to +# control the deployment and state of a GlusterFS cluster. Please see the +# documentation in playbooks/byo/openshift-glusterfs/README.md and +# roles/openshift_storage_glusterfs/README.md for additional details. + +[OSEv3:children] +masters +nodes +# Specify there will be GlusterFS nodes +glusterfs + +[OSEv3:vars] +ansible_ssh_user=root +deployment_type=origin +# Specify that we want to use GlusterFS storage for a hosted registry +openshift_hosted_registry_storage_kind=glusterfs + +[masters] +master node=True storage=True master=True + +[nodes] +master node=True storage=True master=True openshift_schedulable=False +# A hosted registry, by default, will only be deployed on nodes labeled +# "region=infra". +node0 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node1 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True + +# Specify the glusterfs group, which contains the nodes that will host +# GlusterFS storage pods. At a minimum, each node must have a +# "glusterfs_devices" variable defined. This variable is a list of block +# devices the node will have access to that is intended solely for use as +# GlusterFS storage. These block devices must be bare (e.g. have no data, not +# be marked as LVM PVs), and will be formatted. +[glusterfs] +node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index f70971537..6ec8b9317 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -78,6 +78,18 @@ openshift_release=v1.5 #openshift_docker_blocked_registries=registry.hacker.com # Disable pushing to dockerhub #openshift_docker_disable_push_dockerhub=True +# Use Docker inside a System Container. Note that this is a tech preview and should +# not be used to upgrade! +# The following options for docker are ignored: +# - docker_version +# - docker_upgrade +# The following options must not be used +# - openshift_docker_options +#openshift_docker_use_system_container=False +# Force the registry to use for the system container. By default the registry +# will be built off of the deployment type and ansible_distribution. Only +# use this option if you are sure you know what you are doing! +#openshift_docker_systemcontainer_image_registry_override="registry.example.com" # Items added, as is, to end of /etc/sysconfig/docker OPTIONS # Default value: "--log-driver=journald" #openshift_docker_options="-l warn --ipv6=false" @@ -426,9 +438,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 #openshift_hosted_registry_storage_volume_size=10Gi # -# Native GlusterFS Registry Storage -#openshift_hosted_registry_storage_kind=glusterfs -# # AWS S3 # S3 bucket must already exist. #openshift_hosted_registry_storage_kind=object @@ -571,10 +580,17 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # network blocks should be private and should not conflict with network blocks # in your infrastructure that pods may require access to. Can not be changed # after deployment. +# +# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of +# 172.17.0.0/16. Your installation will fail and/or your configuration change will +# cause the Pod SDN or Cluster SDN to fail. +# +# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting +# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS +# environment variable located in /etc/sysconfig/docker-network. #osm_cluster_network_cidr=10.128.0.0/14 #openshift_portal_net=172.30.0.0/16 - # ExternalIPNetworkCIDRs controls what values are acceptable for the # service external IP field. If empty, no externalIP may be set. It # may contain a list of CIDRs which are checked for access. If a CIDR @@ -754,7 +770,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_node_env_vars={"ENABLE_HTTP2": "true"} # Enable API service auditing, available as of 1.3 -#openshift_master_audit_config={"basicAuditEnabled": true} +#openshift_master_audit_config={"enabled": true} # Enable origin repos that point at Centos PAAS SIG, defaults to true, only used # by deployment_type=origin diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index f5e0de1b0..05945f586 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -78,6 +78,18 @@ openshift_release=v3.5 #openshift_docker_blocked_registries=registry.hacker.com # Disable pushing to dockerhub #openshift_docker_disable_push_dockerhub=True +# Use Docker inside a System Container. Note that this is a tech preview and should +# not be used to upgrade! +# The following options for docker are ignored: +# - docker_version +# - docker_upgrade +# The following options must not be used +# - openshift_docker_options +#openshift_docker_use_system_container=False +# Force the registry to use for the system container. By default the registry +# will be built off of the deployment type and ansible_distribution. Only +# use this option if you are sure you know what you are doing! +#openshift_docker_systemcontainer_image_registry_override="registry.example.com" # Items added, as is, to end of /etc/sysconfig/docker OPTIONS # Default value: "--log-driver=journald" #openshift_docker_options="-l warn --ipv6=false" @@ -426,9 +438,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 #openshift_hosted_registry_storage_volume_size=10Gi # -# Native GlusterFS Registry Storage -#openshift_hosted_registry_storage_kind=glusterfs -# # AWS S3 # # S3 bucket must already exist. @@ -572,10 +581,17 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # network blocks should be private and should not conflict with network blocks # in your infrastructure that pods may require access to. Can not be changed # after deployment. +# +# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of +# 172.17.0.0/16. Your installation will fail and/or your configuration change will +# cause the Pod SDN or Cluster SDN to fail. +# +# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting +# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS +# environment variable located in /etc/sysconfig/docker-network. #osm_cluster_network_cidr=10.128.0.0/14 #openshift_portal_net=172.30.0.0/16 - # ExternalIPNetworkCIDRs controls what values are acceptable for the # service external IP field. If empty, no externalIP may be set. It # may contain a list of CIDRs which are checked for access. If a CIDR @@ -755,7 +771,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_node_env_vars={"ENABLE_HTTP2": "true"} # Enable API service auditing, available as of 3.2 -#openshift_master_audit_config={"basicAuditEnabled": true} +#openshift_master_audit_config={"enabled": true} # Validity of the auto-generated OpenShift certificates in days. # See also openshift_hosted_registry_cert_expire_days above. diff --git a/library/kubeclient_ca.py b/library/kubeclient_ca.py index 163624a76..a89a5574f 100644 --- a/library/kubeclient_ca.py +++ b/library/kubeclient_ca.py @@ -1,7 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - ''' kubeclient_ca ansible module ''' import base64 diff --git a/library/modify_yaml.py b/library/modify_yaml.py index 8706e80c2..9b8f9ba33 100755 --- a/library/modify_yaml.py +++ b/library/modify_yaml.py @@ -1,7 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - ''' modify_yaml ansible module ''' import yaml diff --git a/lookup_plugins/oo_option.py b/lookup_plugins/oo_option.py index 7909d0092..4581cb6b8 100644 --- a/lookup_plugins/oo_option.py +++ b/lookup_plugins/oo_option.py @@ -1,7 +1,5 @@ #!/usr/bin/env python2 # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - ''' oo_option lookup plugin for openshift-ansible diff --git a/openshift-ansible.spec b/openshift-ansible.spec index aad8bbff3..016e86b85 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -9,7 +9,7 @@ %global __requires_exclude ^/usr/bin/ansible-playbook$ Name: openshift-ansible -Version: 3.6.30 +Version: 3.6.67 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -17,7 +17,7 @@ URL: https://github.com/openshift/openshift-ansible Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz BuildArch: noarch -Requires: ansible >= 2.2.0.0-1 +Requires: ansible >= 2.2.2.0 Requires: python2 Requires: python-six Requires: tar @@ -25,6 +25,7 @@ Requires: openshift-ansible-docs = %{version} Requires: java-1.8.0-openjdk-headless Requires: httpd-tools Requires: libselinux-python +Requires: python-passlib %description Openshift and Atomic Enterprise Ansible @@ -273,6 +274,202 @@ Atomic OpenShift Utilities includes %changelog +* Wed May 10 2017 Scott Dodson <sdodson@redhat.com> 3.6.67-1 +- byo: correct option name (gscrivan@redhat.com) +- Fail if rpm version != docker image version (jchaloup@redhat.com) +- Perform package upgrades in one transaction (sdodson@redhat.com) +- Properly fail if OpenShift RPM version is undefined (rteague@redhat.com) + +* Wed May 10 2017 Scott Dodson <sdodson@redhat.com> 3.6.66-1 +- Fix issue with Travis-CI using old pip version (rteague@redhat.com) +- Remove vim configuration from Python files (rhcarvalho@gmail.com) +- Use local variables for daemon.json template (smilner@redhat.com) +- Fix additional master cert & client config creation. (abutcher@redhat.com) + +* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.62-1 +- + +* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.61-1 +- + +* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.60-1 +- + +* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.59-1 +- Updating logging and metrics to restart api, ha and controllers when updating + master config (ewolinet@redhat.com) +- Adding defaults for es_indices (ewolinet@redhat.com) +- Updating logic for generating pvcs and their counts to prevent reuse when + looping (ewolinet@redhat.com) + +* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.58-1 +- Moving Dockerfile content to images dir (jupierce@redhat.com) + +* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.57-1 +- + +* Sun May 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.56-1 +- + +* Sat May 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.55-1 +- Fix 1448368, and some other minors issues (ghuang@redhat.com) +- mux startup is broken without this fix (rmeggins@redhat.com) +- Dockerfile: create symlink for /opt/app-root/src (gscrivan@redhat.com) +- docs: Add basic system container dev docs (smilner@redhat.com) +- installer: Add system container variable for log saving (smilner@redhat.com) +- installer: support running as a system container (gscrivan@redhat.com) + +* Fri May 05 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.54-1 +- Allow oc_ modules to pass unicode results (rteague@redhat.com) +- Ensure repo cache is clean on the first run (rteague@redhat.com) +- move etcdctl.yml from etcd to etcd_common role (jchaloup@redhat.com) +- Modified pick from release-1.5 for updating hawkular htpasswd generation + (ewolinet@redhat.com) + +* Thu May 04 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.53-1 +- Correctly setting the primary and replica shard count settings + (ewolinet@redhat.com) +- System container docker (smilner@redhat.com) +- Stop logging AWS credentials in master role. (dgoodwin@redhat.com) +- Remove set operations from openshift_master_certificates iteration. + (abutcher@redhat.com) +- Refactor system fact gathering to avoid dictionary size change during + iteration. (abutcher@redhat.com) +- Refactor secret generation for python3. (abutcher@redhat.com) +- redhat-ci: use requirements.txt (jlebon@redhat.com) + +* Wed May 03 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.52-1 +- Making mux with_items list evaluate as empty if didnt get objects before + (ewolinet@redhat.com) +- etcd Upgrade Refactor (rteague@redhat.com) +- v3.3 Upgrade Refactor (rteague@redhat.com) +- v3.4 Upgrade Refactor (rteague@redhat.com) +- v3.5 Upgrade Refactor (rteague@redhat.com) +- v3.6 Upgrade Refactor (rteague@redhat.com) +- Fix variants for v3.6 (rteague@redhat.com) +- Normalizing groups. (kwoodson@redhat.com) +- Use openshift_ca_host's hostnames to sign the CA (sdodson@redhat.com) + +* Tue May 02 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.51-1 +- Remove std_include from playbooks/byo/rhel_subscribe.yml + (abutcher@redhat.com) +- Adding way to add labels and nodeselectors to logging project + (ewolinet@redhat.com) + +* Tue May 02 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.50-1 +- Don't double quote when conditions (sdodson@redhat.com) +- Remove jinja template delimeters from when conditions (sdodson@redhat.com) +- move excluder upgrade validation tasks under openshift_excluder role + (jchaloup@redhat.com) +- Fix test compatibility with OpenSSL 1.1.0 (pierre- + louis.bonicoli@libregerbil.fr) + +* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.49-1 +- Warn users about conflicts with docker0 CIDR range (lpsantil@gmail.com) +- Bump ansible rpm dependency to 2.2.2.0 (sdodson@redhat.com) + +* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.48-1 +- + +* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.47-1 +- + +* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.46-1 +- Contrib: Hook to verify modules match assembled fragments + (tbielawa@redhat.com) + +* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.45-1 +- + +* Sun Apr 30 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.44-1 +- Refactor etcd roles (jchaloup@redhat.com) + +* Sat Apr 29 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.43-1 +- Document the Pull Request process (rhcarvalho@gmail.com) +- Add Table of Contents (rhcarvalho@gmail.com) +- Improve Contribution Guide (rhcarvalho@gmail.com) +- Replace absolute with relative URLs (rhcarvalho@gmail.com) +- Move repo structure to a separate document (rhcarvalho@gmail.com) +- Remove outdated information about PRs (rhcarvalho@gmail.com) +- Move link to BUILD.md to README.md (rhcarvalho@gmail.com) +- Adding checks for starting mux for 2.2.0 (ewolinet@redhat.com) +- Fix OpenShift registry deployment on OSE 3.2 (lhuard@amadeus.com) + +* Fri Apr 28 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.42-1 +- Fix certificate check Job examples (pep@redhat.com) +- Add python-boto requirement (pep@redhat.com) + +* Thu Apr 27 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.41-1 +- Add bool for proper conditional handling (rteague@redhat.com) + +* Thu Apr 27 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.40-1 +- Fix cluster creation with `bin/cluster` when there’s no glusterfs node + (lhuard@amadeus.com) + +* Thu Apr 27 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.39-1 +- Move container build instructions to BUILD.md (pep@redhat.com) +- Elaborate container image usage instructions (pep@redhat.com) + +* Wed Apr 26 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.38-1 +- .redhat-ci.yml: also publish journal logs (jlebon@redhat.com) +- Standardize all Origin versioning on 3.6 (rteague@redhat.com) +- integration tests: add CI scripts (lmeyer@redhat.com) +- preflight int tests: define image builds to support tests (lmeyer@redhat.com) +- preflight int tests: generalize; add tests (lmeyer@redhat.com) +- Add stub of preflight integration tests (rhcarvalho@gmail.com) +- Move Python unit tests to subdirectory (rhcarvalho@gmail.com) +- Revert "Add /etc/sysconfig/etcd to etcd_container" (sdodson@redhat.com) +- Replace original router cert variable names. (abutcher@redhat.com) +- oc_obj: Allow for multiple kinds in delete (jarrpa@redhat.com) +- Update v1.5 content (sdodson@redhat.com) +- Update v1.6 content (sdodson@redhat.com) +- Make the rhel_subscribe role subscribe to OSE 3.5 channel by default + (lhuard@amadeus.com) +- Addressing yamllint (ewolinet@redhat.com) +- Updating kibana-proxy secret key for server-tls entry (ewolinet@redhat.com) +- Pick from issue3896 (ewolinet@redhat.com) +- Cleanup comments and remove extraneous tasks (sdodson@redhat.com) +- Store backups in /var/lib/etcd/openshift-backup (sdodson@redhat.com) +- Create member/snap directory encase it doesn't exist (sdodson@redhat.com) +- Copy v3 data dir when performing backup (sdodson@redhat.com) + +* Tue Apr 25 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.37-1 +- Differentiate between service serving router certificate and custom + openshift_hosted_router_certificate when replacing the router certificate. + (abutcher@redhat.com) + +* Tue Apr 25 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.36-1 +- Update swap disable tasks (rteague@redhat.com) +- Removing resource version to remove object conflicts caused by race + conditions. (kwoodson@redhat.com) +- cast openshift_logging_use_mux_client to bool (rmeggins@redhat.com) +- mux does not require privileged, only hostmount-anyuid (rmeggins@redhat.com) +- Switched Heapster to use certificates generated by OpenShift + (juraci@kroehling.de) +- Use metrics and logging deployer tag v3.4 for enterprise (sdodson@redhat.com) +- Remove v1.5 and v1.6 metrics/logging templates (sdodson@redhat.com) + +* Sun Apr 23 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.35-1 +- + +* Fri Apr 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.34-1 +- GlusterFS: provide default for groups.oo_glusterfs_to_config in with_items + (jarrpa@redhat.com) + +* Fri Apr 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.33-1 +- Adding module calls instead of command for idempotency. (kwoodson@redhat.com) +- Use return_value when value is constant (pierre- + louis.bonicoli@libregerbil.fr) +- Add missing mock for locate_oc_binary method (pierre- + louis.bonicoli@libregerbil.fr) + +* Fri Apr 21 2017 Scott Dodson <sdodson@redhat.com> 3.6.32-1 +- Don't check excluder versions when they're not enabled (sdodson@redhat.com) + +* Fri Apr 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.31-1 +- Stop all services prior to upgrading, start all services after + (sdodson@redhat.com) + * Thu Apr 20 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.30-1 - Add Ansible syntax checks to tox (rteague@redhat.com) - Add /etc/sysconfig/etcd to etcd_container (me@fale.io) diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml index 81c1ee653..64f861c6a 100644 --- a/playbooks/adhoc/create_pv/create_pv.yaml +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -20,7 +20,7 @@ pre_tasks: - fail: msg: "This playbook requires {{item}} to be set." - when: "{{ item }} is not defined or {{ item }} == ''" + when: item is not defined or item == '' with_items: - cli_volume_size - cli_device_name diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index f638fab83..507ac0f05 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -33,7 +33,7 @@ pre_tasks: - fail: msg: "This playbook requires {{item}} to be set." - when: "{{ item }} is not defined or {{ item }} == ''" + when: item is not defined or item == '' with_items: - cli_tag_name - cli_volume_size diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml index d988a28b0..3059d3dc5 100755 --- a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml @@ -24,7 +24,7 @@ pre_tasks: - fail: msg: "This playbook requires {{item}} to be set." - when: "{{ item }} is not defined or {{ item }} == ''" + when: item is not defined or item == '' with_items: - cli_docker_device diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml index b6dde357e..5e12cd181 100644 --- a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml +++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml @@ -25,7 +25,7 @@ - fail: msg: "This playbook requires {{item}} to be set." - when: "{{ item }} is not defined or {{ item }} == ''" + when: item is not defined or item == '' with_items: - cli_tag_name diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py index daff68fbe..cacd0b0f3 100644 --- a/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py +++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Custom filters for use in openshift-ansible ''' diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index 598f1966d..eb8440d1b 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -42,7 +42,7 @@ pre_tasks: - fail: msg: "This playbook requires {{item}} to be set." - when: "{{ item }} is not defined or {{ item }} == ''" + when: item is not defined or item == '' with_items: - cli_tag_name - cli_volume_size diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index ffdcd0ce1..beaf20b07 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -125,7 +125,7 @@ - name: Remove flannel package package: name=flannel state=absent when: openshift_use_flannel | default(false) | bool - when: "{{ not is_atomic | bool }}" + when: not is_atomic | bool - shell: systemctl reset-failed changed_when: False @@ -146,7 +146,7 @@ - lbr0 - vlinuxbr - vovsbr - when: "{{ openshift_remove_all | default(true) | bool }}" + when: openshift_remove_all | default(true) | bool - shell: atomic uninstall "{{ item }}"-master-api changed_when: False @@ -239,7 +239,7 @@ changed_when: False failed_when: False with_items: "{{ images_to_delete.results }}" - when: "{{ openshift_uninstall_images | default(True) | bool }}" + when: openshift_uninstall_images | default(True) | bool - name: remove sdn drop files file: @@ -252,7 +252,7 @@ - /etc/sysconfig/openshift-node - /etc/sysconfig/openvswitch - /run/openshift-sdn - when: "{{ openshift_remove_all | default(True) | bool }}" + when: openshift_remove_all | default(True) | bool - find: path={{ item }} file_type=file register: files diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml index fbaf81dec..119df9c7d 100644 --- a/playbooks/aws/openshift-cluster/cluster_hosts.yml +++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml @@ -8,6 +8,8 @@ g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([]) g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}" +g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}" + g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}" g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}" diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml index 268a65415..9d086b7b6 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -15,6 +15,8 @@ g_nfs_hosts: "{{ groups.nfs | default([]) }}" g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}" +g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}" + g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | union(g_lb_hosts) | union(g_nfs_hosts) | union(g_new_node_hosts)| union(g_new_master_hosts) diff --git a/playbooks/byo/openshift-cluster/initialize_groups.yml b/playbooks/byo/openshift-cluster/initialize_groups.yml index 2785dcc3b..2a725510a 100644 --- a/playbooks/byo/openshift-cluster/initialize_groups.yml +++ b/playbooks/byo/openshift-cluster/initialize_groups.yml @@ -8,17 +8,3 @@ - always tasks: - include_vars: cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: no - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: cluster_hosts.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index 690b663f4..697a18c4d 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -4,106 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index fca2c04f3..4d284c279 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -13,101 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index d171ac3cd..180a2821f 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -6,103 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" +- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml index 217163802..8cce91b3f 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -4,104 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml index d21c195bf..8e5d0f5f9 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -13,101 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml index 7bb66611c..d5329b858 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -6,101 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml index f0900e04e..f44d55ad2 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -4,110 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. -# So it is necassary to run the play after running disable_excluder.yml. -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index e8d834a04..2377713fa 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -13,105 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml index c2a4debc8..5b3f6ab06 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -6,101 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles b/playbooks/byo/openshift-cluster/upgrades/v3_6/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml index 763e79e01..40120b3e8 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -4,110 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. -# So it is necassary to run the play after running disable_excluder.yml. -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 7a1377be2..408a4c631 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -13,105 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 065746493..b5f42b804 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -6,101 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml new file mode 100644 index 000000000..dd3f47a4d --- /dev/null +++ b/playbooks/byo/openshift-etcd/config.yml @@ -0,0 +1,14 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-etcd/config.yml + vars: + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_debug_level: "{{ debug_level | default(2) }}" + openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}" diff --git a/playbooks/byo/openshift-glusterfs/README.md b/playbooks/byo/openshift-glusterfs/README.md new file mode 100644 index 000000000..f62aea229 --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/README.md @@ -0,0 +1,98 @@ +# OpenShift GlusterFS Playbooks + +These playbooks are intended to enable the use of GlusterFS volumes by pods in +OpenShift. While they try to provide a sane set of defaults they do cover a +variety of scenarios and configurations, so read carefully. :) + +## Playbook: config.yml + +This is the main playbook that integrates GlusterFS into a new or existing +OpenShift cluster. It will also, if specified, configure a hosted Docker +registry with GlusterFS backend storage. + +This playbook requires the `glusterfs` group to exist in the Ansible inventory +file. The hosts in this group are the nodes of the GlusterFS cluster. + + * If this is a newly configured cluster each host must have a + `glusterfs_devices` variable defined, each of which must be a list of block + storage devices intended for use only by the GlusterFS cluster. If this is + also an external GlusterFS cluster, you must specify + `openshift_storage_glusterfs_is_native=False`. If the cluster is to be + managed by an external heketi service you must also specify + `openshift_storage_glusterfs_heketi_is_native=False` and + `openshift_storage_glusterfs_heketi_url=<URL>` with the URL to the heketi + service. All these variables are specified in `[OSEv3:vars]`, + * If this is an existing cluster you do not need to specify a list of block + devices but you must specify the following variables in `[OSEv3:vars]`: + * `openshift_storage_glusterfs_is_missing=False` + * `openshift_storage_glusterfs_heketi_is_missing=False` + +By default, pods for a native GlusterFS cluster will be created in the +`default` namespace. To change this, specify +`openshift_storage_glusterfs_namespace=<other namespace>` in `[OSEv3:vars]`. + +To configure the deployment of a Docker registry with GlusterFS backend +storage, specify `openshift_hosted_registry_storage_kind=glusterfs` in +`[OSEv3:vars]`. To create a separate GlusterFS cluster for use only by the +registry, specify a `glusterfs_registry` group that is populated as the +`glusterfs` is with the nodes for the separate cluster. If no +`glusterfs_registry` group is specified, the cluster defined by the `glusterfs` +group will be used. + +To swap an existing hosted registry's backend storage for a GlusterFS volume, +specify `openshift_hosted_registry_storage_glusterfs_swap=True`. To +additoinally copy any existing contents from an existing hosted registry, +specify `openshift_hosted_registry_storage_glusterfs_swapcopy=True`. + +**NOTE:** For each namespace that is to have access to GlusterFS volumes an +Enpoints resource pointing to the GlusterFS cluster nodes and a corresponding +Service resource must be created. If dynamic provisioning using StorageClasses +is configure, these resources are created automatically in the namespaces that +require them. This playbook also takes care of creating these resources in the +namespaces used for deployment. + +An example of a minimal inventory file: +``` +[OSEv3:children] +masters +nodes +glusterfs + +[OSEv3:vars] +ansible_ssh_user=root +deployment_type=origin + +[masters] +master + +[nodes] +node0 +node1 +node2 + +[glusterfs] +node0 glusterfs_devices='[ "/dev/sdb" ]' +node1 glusterfs_devices='[ "/dev/sdb", "/dev/sdc" ]' +node2 glusterfs_devices='[ "/dev/sdd" ]' +``` + +## Playbook: registry.yml + +This playbook is intended for admins who want to deploy a hosted Docker +registry with GlusterFS backend storage on an existing OpenShift cluster. It +has all the same requirements and behaviors as `config.yml`. + +## Role: openshift_storage_glusterfs + +The bulk of the work is done by the `openshift_storage_glusterfs` role. This +role can handle the deployment of GlusterFS (if it is to be hosted on the +OpenShift cluster), the registration of GlusterFS nodes (hosted or standalone), +and (if specified) integration as backend storage for a hosted Docker registry. + +See the documentation in the role's directory for further details. + +## Role: openshift_hosted + +The `openshift_hosted` role recognizes `glusterfs` as a possible storage +backend for a hosted docker registry. It will also, if configured, handle the +swap of an existing registry's backend storage to a GlusterFS volume. diff --git a/playbooks/byo/openshift-glusterfs/config.yml b/playbooks/byo/openshift-glusterfs/config.yml new file mode 100644 index 000000000..3f11f3991 --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/config.yml @@ -0,0 +1,10 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-glusterfs/config.yml diff --git a/playbooks/byo/openshift-glusterfs/filter_plugins b/playbooks/byo/openshift-glusterfs/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/byo/openshift-glusterfs/lookup_plugins b/playbooks/byo/openshift-glusterfs/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/byo/openshift-glusterfs/registry.yml b/playbooks/byo/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..6ee6febdb --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/registry.yml @@ -0,0 +1,10 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-glusterfs/registry.yml diff --git a/playbooks/byo/openshift-glusterfs/roles b/playbooks/byo/openshift-glusterfs/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml index c5f05d0f0..04a55308a 100644 --- a/playbooks/byo/openshift-preflight/check.yml +++ b/playbooks/byo/openshift-preflight/check.yml @@ -1,5 +1,7 @@ --- -- hosts: OSEv3 +- include: ../openshift-cluster/initialize_groups.yml + +- hosts: g_all_hosts name: run OpenShift health checks roles: - openshift_health_checker diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml index 3b10323d6..75b606e61 100644 --- a/playbooks/byo/openshift_facts.yml +++ b/playbooks/byo/openshift_facts.yml @@ -8,7 +8,7 @@ - always - name: Gather Cluster facts - hosts: OSEv3 + hosts: g_all_hosts roles: - openshift_facts tasks: diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 777743def..aec87cf82 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -3,12 +3,8 @@ tags: - always -- include: ../common/openshift-cluster/std_include.yml - tags: - - always - - name: Subscribe hosts, update repos and update OS packages - hosts: l_oo_all_hosts + hosts: g_all_hosts roles: - role: rhel_subscribe when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 6aac70f63..46932b27f 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -5,33 +5,40 @@ become: no gather_facts: no tasks: - - fail: + - name: Evaluate groups - g_etcd_hosts required + fail: msg: This playbook requires g_etcd_hosts to be set - when: "{{ g_etcd_hosts is not defined }}" + when: g_etcd_hosts is not defined - - fail: + - name: Evaluate groups - g_master_hosts or g_new_master_hosts required + fail: msg: This playbook requires g_master_hosts or g_new_master_hosts to be set - when: "{{ g_master_hosts is not defined and g_new_master_hosts is not defined }}" + when: g_master_hosts is not defined or g_new_master_hosts is not defined - - fail: + - name: Evaluate groups - g_node_hosts or g_new_node_hosts required + fail: msg: This playbook requires g_node_hosts or g_new_node_hosts to be set - when: "{{ g_node_hosts is not defined and g_new_node_hosts is not defined }}" + when: g_node_hosts is not defined or g_new_node_hosts is not defined - - fail: + - name: Evaluate groups - g_lb_hosts required + fail: msg: This playbook requires g_lb_hosts to be set - when: "{{ g_lb_hosts is not defined }}" + when: g_lb_hosts is not defined - - fail: + - name: Evaluate groups - g_nfs_hosts required + fail: msg: This playbook requires g_nfs_hosts to be set - when: "{{ g_nfs_hosts is not defined }}" + when: g_nfs_hosts is not defined - - fail: + - name: Evaluate groups - g_nfs_hosts is single host + fail: msg: The nfs group must be limited to one host - when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}" + when: (groups[g_nfs_hosts] | default([])) | length > 1 - - fail: + - name: Evaluate groups - g_glusterfs_hosts required + fail: msg: This playbook requires g_glusterfs_hosts to be set - when: "{{ g_glusterfs_hosts is not defined }}" + when: g_glusterfs_hosts is not defined - name: Evaluate oo_all_hosts add_host: @@ -51,13 +58,13 @@ with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}" changed_when: no - - name: Evaluate oo_etcd_to_config + - name: Evaluate oo_first_master add_host: - name: "{{ item }}" - groups: oo_etcd_to_config + name: "{{ g_master_hosts[0] }}" + groups: oo_first_master ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_etcd_hosts | default([]) }}" + when: g_master_hosts|length > 0 changed_when: no - name: Evaluate oo_masters_to_config @@ -69,41 +76,59 @@ with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}" changed_when: no - - name: Evaluate oo_nodes_to_config + - name: Evaluate oo_etcd_to_config add_host: name: "{{ item }}" - groups: oo_nodes_to_config + groups: oo_etcd_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}" + with_items: "{{ g_etcd_hosts | default([]) }}" changed_when: no - # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is - - name: Add master to oo_nodes_to_config + - name: Evaluate oo_first_etcd add_host: - name: "{{ item }}" - groups: oo_nodes_to_config + name: "{{ g_etcd_hosts[0] }}" + groups: oo_first_etcd ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_master_hosts | default([]) }}" - when: "{{ g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool }}" + when: g_etcd_hosts|length > 0 changed_when: no - - name: Evaluate oo_first_etcd + # We use two groups one for hosts we're upgrading which doesn't include embedded etcd + # The other for backing up which includes the embedded etcd host, there's no need to + # upgrade embedded etcd that just happens when the master is updated. + - name: Evaluate oo_etcd_hosts_to_upgrade add_host: - name: "{{ g_etcd_hosts[0] }}" - groups: oo_first_etcd + name: "{{ item }}" + groups: oo_etcd_hosts_to_upgrade + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" + changed_when: False + + - name: Evaluate oo_etcd_hosts_to_backup + add_host: + name: "{{ item }}" + groups: oo_etcd_hosts_to_backup + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" + changed_when: False + + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - when: "{{ g_etcd_hosts|length > 0 }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}" changed_when: no - - name: Evaluate oo_first_master + # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is + - name: Add master to oo_nodes_to_config add_host: - name: "{{ g_master_hosts[0] }}" - groups: oo_first_master + name: "{{ item }}" + groups: oo_nodes_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - when: "{{ g_master_hosts|length > 0 }}" + with_items: "{{ g_master_hosts | default([]) }}" + when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool changed_when: no - name: Evaluate oo_lb_to_config @@ -130,5 +155,5 @@ groups: oo_glusterfs_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_glusterfs_hosts | default([]) }}" + with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts) | default([]) }}" changed_when: no diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 07b38920f..f4e52869e 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,13 +1,14 @@ --- # NOTE: requires openshift_facts be run - name: Verify compatible yum/subscription-manager combination - hosts: l_oo_all_hosts + hosts: oo_all_hosts gather_facts: no tasks: # See: # https://bugzilla.redhat.com/show_bug.cgi?id=1395047 # https://bugzilla.redhat.com/show_bug.cgi?id=1282961 # https://github.com/openshift/openshift-ansible/issues/1138 + # Consider the repoquery module for this work - name: Check for bad combinations of yum and subscription-manager command: > {{ repoquery_cmd }} --installed --qf '%{version}' "yum" @@ -16,7 +17,7 @@ when: not openshift.common.is_atomic | bool - fail: msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. - when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout" + when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout - name: Determine openshift_version to configure on first master hosts: oo_first_master diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml index a7b614341..9f14f2d69 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml @@ -67,7 +67,66 @@ service.alpha.openshift.io/serving-cert-secret-name=router-certs --config={{ mktemp.stdout }}/admin.kubeconfig -n default - when: l_router_dc.rc == 0 and 'router-certs' in router_secrets + when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is undefined + + - block: + - assert: + that: + - "'certfile' in openshift_hosted_router_certificate" + - "'keyfile' in openshift_hosted_router_certificate" + - "'cafile' in openshift_hosted_router_certificate" + msg: |- + openshift_hosted_router_certificate has been set in the inventory but is + missing one or more required keys. Ensure that 'certfile', 'keyfile', + and 'cafile' keys have been specified for the openshift_hosted_router_certificate + inventory variable. + + - name: Read router certificate and key + become: no + local_action: + module: slurp + src: "{{ item }}" + register: openshift_router_certificate_output + # Defaulting dictionary keys to none to avoid deprecation warnings + # (future fatal errors) during template evaluation. Dictionary keys + # won't be accessed unless openshift_hosted_router_certificate is + # defined and has all keys (certfile, keyfile, cafile) which we + # check above. + with_items: + - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}" + - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}" + - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}" + + - name: Write temporary router certificate file + copy: + content: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}" + dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" + mode: 0600 + + - name: Write temporary router key file + copy: + content: "{{ (openshift_router_certificate_output.results + | oo_collect('content', {'source':(openshift_hosted_router_certificate | default({'keyfile':none})).keyfile}))[0] | b64decode }}" + dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" + mode: 0600 + + - name: Replace router-certs secret + shell: > + {{ openshift.common.client_binary }} secrets new router-certs + tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" + tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" + --type=kubernetes.io/tls + --confirm + -o json | {{ openshift.common.client_binary }} replace -f - + + - name: Remove temporary router certificate and key files + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" + - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" + when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is defined - name: Redeploy router command: > diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml index d1e431c5e..02042c1ef 100644 --- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml +++ b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml @@ -3,14 +3,10 @@ hosts: oo_masters_to_config:oo_nodes_to_config gather_facts: no tasks: - - include: pre/validate_excluder.yml - vars: - #repoquery_cmd: repoquery_cmd - #openshift_upgrade_target: openshift_upgrade_target - excluder: "{{ item }}" - with_items: - - "{{ openshift.common.service_type }}-docker-excluder" - - "{{ openshift.common.service_type }}-excluder" + # verify the excluders can be upgraded + - include_role: + name: openshift_excluder + tasks_from: verify_upgrade # disable excluders based on their status - include_role: diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index 7ef79afa9..9d0333ca8 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -1,10 +1,10 @@ --- - name: Backup etcd - hosts: etcd_hosts_to_backup + hosts: oo_etcd_hosts_to_backup vars: embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' if not openshift.common.is_etcd_system_container else 'runc exec etcd etcdctl' }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" roles: - openshift_facts tasks: @@ -13,29 +13,20 @@ role: etcd local_facts: {} when: "'etcd' not in openshift" - - - stat: path=/var/lib/openshift - register: var_lib_openshift - - - stat: path=/var/lib/origin - register: var_lib_origin - - - name: Create origin symlink if necessary - file: src=/var/lib/openshift/ dest=/var/lib/origin state=link - when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False + - set_fact: + etcd_backup_dir: "{{ openshift.etcd.etcd_data_dir }}/openshift-backup-{{ backup_tag | default('') }}{{ timestamp }}" # TODO: replace shell module with command and update later checks - # We assume to be using the data dir for all backups. - name: Check available disk space for etcd backup - shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 + shell: df --output=avail -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 register: avail_disk # AUDIT:changed_when: `false` because we are only inspecting # state, not manipulating anything changed_when: false # TODO: replace shell module with command and update later checks - - name: Check current embedded etcd disk usage - shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 + - name: Check current etcd disk usage + shell: du --exclude='*openshift-backup*' -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 register: etcd_disk_usage when: embedded_etcd | bool # AUDIT:changed_when: `false` because we are only inspecting @@ -52,37 +43,42 @@ # For non containerized and non embedded we should have the correct version of # etcd installed already. So don't do anything. # - # For embedded or containerized we need to use the latest because OCP 3.3 uses - # a version of etcd that can only be backed up with etcd-3.x and if it's - # containerized then etcd version may be newer than that on the host so - # upgrade it. - # - # On atomic we have neither yum nor dnf so ansible throws a hard to debug error - # if you use package there, like this: "Could not find a module for unknown." - # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668 + # For containerized installs we now exec into etcd_container # - # TODO - We should refactor all containerized backups to use the containerized - # version of etcd to perform the backup rather than relying on the host's - # binaries. Until we do that we'll continue to have problems backing up etcd - # when atomic host has an older version than the version that's running in the - # container whether that's embedded or not - - name: Install latest etcd for containerized or embedded + # For embedded non containerized we need to ensure we have the latest version + # etcd on the host. + - name: Install latest etcd for embedded package: name: etcd state: latest - when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic + when: + - embedded_etcd | bool + - not openshift.common.is_atomic | bool - name: Generate etcd backup command: > {{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }} - --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }} + --backup-dir={{ etcd_backup_dir }} + + # According to the docs change you can simply copy snap/db + # https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6 + - name: Check for v3 data store + stat: + path: "{{ openshift.etcd.etcd_data_dir }}/member/snap/db" + register: v3_db + + - name: Copy etcd v3 data store + command: > + cp -a {{ openshift.etcd.etcd_data_dir }}/member/snap/db + {{ etcd_backup_dir }}/member/snap/ + when: v3_db.stat.exists - set_fact: etcd_backup_complete: True - name: Display location of etcd backup debug: - msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}" + msg: "Etcd backup created in {{ etcd_backup_dir }}" - name: Gate on etcd backup hosts: localhost @@ -91,10 +87,10 @@ tasks: - set_fact: etcd_backup_completed: "{{ hostvars - | oo_select_keys(groups.etcd_hosts_to_backup) + | oo_select_keys(groups.oo_etcd_hosts_to_backup) | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" - set_fact: - etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}" - fail: msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh deleted file mode 120000 index 641e04e44..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh +++ /dev/null @@ -1 +0,0 @@ -../roles/etcd/files/etcdctl.sh
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml index fa86d29fb..d9b59edcb 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -5,32 +5,6 @@ # mirrored packages on your own because only the GA and latest versions are # available in the repos. So for Fedora we'll simply skip this, sorry. -- include: ../../evaluate_groups.yml - tags: - - always - -# We use two groups one for hosts we're upgrading which doesn't include embedded etcd -# The other for backing up which includes the embedded etcd host, there's no need to -# upgrade embedded etcd that just happens when the master is updated. -- name: Evaluate additional groups for etcd - hosts: localhost - connection: local - become: no - tasks: - - name: Evaluate etcd_hosts_to_upgrade - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_upgrade - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" - changed_when: False - - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" - changed_when: False - - name: Backup etcd before upgrading anything include: backup.yml vars: @@ -38,9 +12,11 @@ when: openshift_etcd_backup | default(true) | bool - name: Drop etcdctl profiles - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade tasks: - - include: roles/etcd/tasks/etcdctl.yml + - include_role: + name: etcd_common + tasks_from: etcdctl.yml - name: Perform etcd upgrade include: ./upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index a9b5b94e6..45e301315 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -1,6 +1,6 @@ --- - name: Determine etcd version - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade tasks: - name: Record RPM based etcd version command: rpm -qa --qf '%{version}' etcd\* @@ -43,7 +43,7 @@ # I really dislike this copy/pasta but I wasn't able to find a way to get it to loop # through hosts, then loop through tasks only when appropriate - name: Upgrade to 2.1 - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: '2.1' @@ -52,7 +52,7 @@ when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool - name: Upgrade RPM hosts to 2.2 - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: '2.2' @@ -61,7 +61,7 @@ when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool - name: Upgrade containerized hosts to 2.2.5 - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: 2.2.5 @@ -70,7 +70,7 @@ when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool - name: Upgrade RPM hosts to 2.3 - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: '2.3' @@ -79,7 +79,7 @@ when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool - name: Upgrade containerized hosts to 2.3.7 - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: 2.3.7 @@ -88,7 +88,7 @@ when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool - name: Upgrade RPM hosts to 3.0 - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: '3.0' @@ -97,7 +97,7 @@ when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool - name: Upgrade containerized hosts to etcd3 image - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: 3.0.15 @@ -106,7 +106,7 @@ when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool - name: Upgrade fedora to latest - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 tasks: - include: fedora_tasks.yml diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index cbf6d58b3..0f421928b 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -10,17 +10,6 @@ - include: ../initialize_facts.yml -- name: Ensure clean repo cache in the event repos have been changed manually - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - name: Clean package cache - command: "{{ ansible_pkg_mgr }} clean all" - when: not openshift.common.is_atomic | bool - args: - warn: no - - name: Ensure firewall is not switched during upgrade hosts: oo_all_hosts tasks: diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py index 673f11889..4eac8b067 100755 --- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py +++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py @@ -1,7 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - """Ansible module for modifying OpenShift configs during an upgrade""" import os diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index c83923dae..6a9f88707 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -1,21 +1,13 @@ --- - name: Verify upgrade targets hosts: oo_masters_to_config:oo_nodes_to_upgrade - vars: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - pre_tasks: - - fail: + + tasks: + - name: Fail when OpenShift is not installed + fail: msg: Verify OpenShift is already installed when: openshift.common.version is not defined - - fail: - msg: Verify the correct version was found - when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version - - - set_fact: - g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" - when: not openshift.common.is_containerized | bool - - name: Verify containers are available for upgrade command: > docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} @@ -23,19 +15,31 @@ changed_when: "'Downloaded newer image' in pull_result.stdout" when: openshift.common.is_containerized | bool - - name: Check latest available OpenShift RPM version - command: > - {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}" - failed_when: false - changed_when: false - register: avail_openshift_version - when: not openshift.common.is_containerized | bool + - when: not openshift.common.is_containerized | bool + block: + - name: Check latest available OpenShift RPM version + command: > + {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}" + failed_when: false + changed_when: false + register: avail_openshift_version - - name: Verify OpenShift RPMs are available for upgrade - fail: - msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required" - when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<') + - name: Fail when unable to determine available OpenShift RPM version + fail: + msg: "Unable to determine available OpenShift RPM version" + when: + - avail_openshift_version.stdout == '' - - fail: + - name: Verify OpenShift RPMs are available for upgrade + fail: + msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required" + when: + - not avail_openshift_version | skipped + - avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<') + + - name: Fail when openshift version does not meet minium requirement for Origin upgrade + fail: msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" - when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<') + when: + - deployment_type == 'origin' + - openshift.common.version | version_compare(openshift_upgrade_min,'<') diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml index 03ac02e9f..164baca81 100644 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml @@ -1,27 +1,39 @@ --- -# We verified latest rpm available is suitable, so just yum update. +# When we update package "a-${version}" and a requires b >= ${version} if we +# don't specify the version of b yum will choose the latest version of b +# available and the whole set of dependencies end up at the latest version. +# Since the package module, unlike the yum module, doesn't flatten a list +# of packages into one transaction we need to do that explicitly. The ansible +# core team tells us not to rely on yum module transaction flattening anyway. + +# TODO: If the sdn package isn't already installed this will install it, we +# should fix that -# Master package upgrade ends up depending on node and sdn packages, we need to be explicit -# with all versions to avoid yum from accidentally jumping to something newer than intended: - name: Upgrade master packages - package: name={{ item }} state=present - when: component == "master" - with_items: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + package: name={{ master_pkgs | join(',') }} state=present + vars: + master_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - PyYAML + when: + - component == "master" + - not openshift.common.is_atomic | bool - name: Upgrade node packages - package: name={{ item }} state=present - when: component == "node" - with_items: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - -- name: Ensure python-yaml present for config upgrade - package: name=PyYAML state=present - when: not openshift.common.is_atomic | bool + package: name={{ node_pkgs | join(',') }} state=present + vars: + node_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - PyYAML + when: + - component == "node" + - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index c6e799261..0ad934d2d 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,17 +2,6 @@ ############################################################################### # Upgrade Masters ############################################################################### -- name: Evaluate additional groups for upgrade - hosts: localhost - connection: local - become: no - tasks: - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" - changed_when: False # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml index 88f2ddc78..83d2cec81 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml @@ -63,12 +63,12 @@ - block: - debug: msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}" + when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] - debug: msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}" + when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates + when: openshift_master_scheduler_predicates | default(none) is not none # Handle cases where openshift_master_predicates is not defined - block: @@ -87,7 +87,7 @@ when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}" - when: "{{ openshift_master_scheduler_predicates | default(none) is none }}" + when: openshift_master_scheduler_predicates | default(none) is none # Upgrade priorities @@ -120,12 +120,12 @@ - block: - debug: msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}" + when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] - debug: msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}" + when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities + when: openshift_master_scheduler_priorities | default(none) is not none # Handle cases where openshift_master_priorities is not defined - block: @@ -144,7 +144,7 @@ when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}" - when: "{{ openshift_master_scheduler_priorities | default(none) is none }}" + when: openshift_master_scheduler_priorities | default(none) is none # Update scheduler diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml index 68c71a132..d69472fad 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml @@ -53,7 +53,7 @@ dest: "{{ openshift.common.config_base}}/master/master-config.yaml" yaml_key: 'admissionConfig.pluginConfig' yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "{{ 'admission_plugin_config' in openshift.master }}" + when: "'admission_plugin_config' in openshift.master" - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles index 6bc1a7aef..6bc1a7aef 120000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml new file mode 100644 index 000000000..be18c1edd --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -0,0 +1,107 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_excluder.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../upgrade_nodes.yml + vars: + node_config_hook: "v3_3/node_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml new file mode 100644 index 000000000..20dffb44b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -0,0 +1,111 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_excluder.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml new file mode 100644 index 000000000..14aaf70d6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -0,0 +1,106 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_excluder.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml + vars: + node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml index 43c2ffcd4..ed89dbe8d 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml @@ -3,7 +3,7 @@ dest: "{{ openshift.common.config_base}}/master/master-config.yaml" yaml_key: 'admissionConfig.pluginConfig' yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "{{ 'admission_plugin_config' in openshift.master }}" + when: "'admission_plugin_config' in openshift.master" - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles index 6bc1a7aef..6bc1a7aef 120000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml new file mode 100644 index 000000000..5d6455bef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -0,0 +1,105 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_excluder.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_4/master_config_upgrade.yml" + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml new file mode 100644 index 000000000..c76920586 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -0,0 +1,111 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_excluder.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_4/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml new file mode 100644 index 000000000..f397f6015 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -0,0 +1,104 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_excluder.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml new file mode 100644 index 000000000..7cedfb1ca --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -0,0 +1,111 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_excluder.yml + tags: + - pre_upgrade + +# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. +# So it is necessary to run the play after running disable_excluder.yml. +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml + +- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml new file mode 100644 index 000000000..0198074ed --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -0,0 +1,115 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_excluder.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../post_control_plane.yml + +- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml new file mode 100644 index 000000000..2b16875f4 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -0,0 +1,104 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_excluder.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml new file mode 100644 index 000000000..4604bdc8b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -0,0 +1,111 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_excluder.yml + tags: + - pre_upgrade + +# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. +# So it is necassary to run the play after running disable_excluder.yml. +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml + +- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml new file mode 100644 index 000000000..a09097ed9 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -0,0 +1,115 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_excluder.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../post_control_plane.yml + +- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml new file mode 100644 index 000000000..7640f2116 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -0,0 +1,104 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_excluder.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 75faf5ba8..1efdfb336 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -12,7 +12,9 @@ - service: glusterfs_bricks port: "49152-49251/tcp" roles: - - os_firewall + - role: os_firewall + when: + - openshift_storage_glusterfs_is_native | default(True) - name: Configure GlusterFS hosts: oo_first_master diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..80cf7529e --- /dev/null +++ b/playbooks/common/openshift-glusterfs/registry.yml @@ -0,0 +1,49 @@ +--- +- include: config.yml + +- name: Initialize GlusterFS registry PV and PVC vars + hosts: oo_first_master + tags: hosted + tasks: + - set_fact: + glusterfs_pv: [] + glusterfs_pvc: [] + + - set_fact: + glusterfs_pv: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + storage: + glusterfs: + endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" + path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" + readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" + glusterfs_pvc: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + when: openshift.hosted.registry.storage.glusterfs.swap + +- name: Create persistent volumes + hosts: oo_first_master + tags: + - hosted + vars: + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" + roles: + - role: openshift_persistent_volumes + when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 + +- name: Create Hosted Resources + hosts: oo_first_master + tags: + - hosted + pre_tasks: + - set_fact: + openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" + roles: + - role: openshift_hosted diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index 92f16dc47..ab0045a39 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -51,7 +51,7 @@ changed_when: false - name: Configure docker hosts - hosts: oo_masters_to-config:oo_nodes_to_config + hosts: oo_masters_to_config:oo_nodes_to_config vars: docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml index be050c12c..0014a5dbd 100644 --- a/playbooks/common/openshift-node/network_manager.yml +++ b/playbooks/common/openshift-node/network_manager.yml @@ -1,6 +1,6 @@ --- - name: Install and configure NetworkManager - hosts: l_oo_all_hosts + hosts: oo_all_hosts become: yes tasks: - name: install NetworkManager diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml index 74e2420db..05a58db73 100644 --- a/playbooks/gce/openshift-cluster/cluster_hosts.yml +++ b/playbooks/gce/openshift-cluster/cluster_hosts.yml @@ -8,6 +8,8 @@ g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([]) g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}" +g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}" + g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}" g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}" diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml index 74e2420db..05a58db73 100644 --- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml +++ b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml @@ -8,6 +8,8 @@ g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([]) g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}" +g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}" + g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}" g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}" diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 78581fdfe..ccd29be29 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -14,7 +14,7 @@ url: '{{ image_url }}' sha256sum: '{{ image_sha256 }}' dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}' - when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}' + when: ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] register: downloaded_image - name: Uncompress xz compressed base cloud image diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml index 98434439c..505f7b3a8 100644 --- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml +++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml @@ -8,6 +8,8 @@ g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([] g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}" +g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}" + g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}" g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}" diff --git a/requirements.txt b/requirements.txt index d00de5ed4..1996a967d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,7 @@ # Versions are pinned to prevent pypi releases arbitrarily breaking # tests with new APIs/semantics. We want to update versions deliberately. ansible==2.2.2.0 +boto==2.45.0 click==6.7 pyOpenSSL==16.2.0 # We need to disable ruamel.yaml for now because of test failures diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml index 65d75cf00..53cecfcc3 100644 --- a/roles/calico/handlers/main.yml +++ b/roles/calico/handlers/main.yml @@ -5,4 +5,6 @@ - name: restart docker become: yes - systemd: name=docker state=restarted + systemd: + name: "{{ openshift.docker.service_name }}" + state: restarted diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml index 97b9762df..0847c92bc 100644 --- a/roles/contiv/tasks/netplugin.yml +++ b/roles/contiv/tasks/netplugin.yml @@ -105,7 +105,7 @@ - name: Docker | Restart docker service: - name: docker + name: "{{ openshift.docker.service_name }}" state: restarted when: docker_updated|changed diff --git a/roles/docker/README.md b/roles/docker/README.md index ea06fd41a..f25ca03cd 100644 --- a/roles/docker/README.md +++ b/roles/docker/README.md @@ -1,7 +1,7 @@ Docker ========= -Ensures docker package is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes. +Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes. Requirements ------------ @@ -11,8 +11,10 @@ Ansible 2.2 Role Variables -------------- -udevw_udevd_dir: location of systemd config for systemd-udevd.service +docker_conf_dir: location of the Docker configuration directory +docker_systemd_dir location of the systemd directory for Docker docker_udev_workaround: raises udevd timeout to 5 minutes (https://bugzilla.redhat.com/show_bug.cgi?id=1272446) +udevw_udevd_dir: location of systemd config for systemd-udevd.service Dependencies ------------ @@ -26,6 +28,7 @@ Example Playbook roles: - role: docker docker_udev_workaround: "true" + docker_use_system_container: False License ------- diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index 9ccb306fc..7f91afb37 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -2,7 +2,7 @@ - name: restart docker systemd: - name: docker + name: "{{ openshift.docker.service_name }}" state: restarted when: not docker_service_status_changed | default(false) | bool diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml index ad28cece9..cd4083572 100644 --- a/roles/docker/meta/main.yml +++ b/roles/docker/meta/main.yml @@ -11,3 +11,4 @@ galaxy_info: - 7 dependencies: - role: os_firewall +- role: lib_openshift diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index c34700aeb..0c2b16acf 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -1,119 +1,17 @@ --- -- name: Get current installed Docker version - command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" - when: not openshift.common.is_atomic | bool - register: curr_docker_version - changed_when: false - -- name: Error out if Docker pre-installed but too old - fail: - msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined - -- name: Error out if requested Docker is too old - fail: - msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required." - when: docker_version is defined and docker_version | version_compare('1.9.1', '<') - -# If a docker_version was requested, sanity check that we can install or upgrade to it, and -# no downgrade is required. -- name: Fail if Docker version requested but downgrade is required - fail: - msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>') - -# This involves an extremely slow migration process, users should instead run the -# Docker 1.10 upgrade playbook to accomplish this. -- name: Error out if attempting to upgrade Docker across the 1.10 boundary - fail: - msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=') - -# Make sure Docker is installed, but does not update a running version. -# Docker upgrades are handled by a separate playbook. -- name: Install Docker - package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present - when: not openshift.common.is_atomic | bool - -- block: - # Extend the default Docker service unit file when using iptables-services - - name: Ensure docker.service.d directory exists - file: - path: "{{ docker_systemd_dir }}" - state: directory - - - name: Configure Docker service unit file - template: - dest: "{{ docker_systemd_dir }}/custom.conf" - src: custom.conf.j2 - when: not os_firewall_use_firewalld | default(True) | bool +# These tasks dispatch to the proper set of docker tasks based on the +# inventory:openshift_docker_use_system_container variable - include: udev_workaround.yml when: docker_udev_workaround | default(False) | bool -- stat: path=/etc/sysconfig/docker - register: docker_check - -- name: Set registry params - lineinfile: - dest: /etc/sysconfig/docker - regexp: '^{{ item.reg_conf_var }}=.*$' - line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" - when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg - with_items: - - reg_conf_var: ADD_REGISTRY - reg_fact_val: "{{ docker_additional_registries | default(None, true)}}" - reg_flag: --add-registry - - reg_conf_var: BLOCK_REGISTRY - reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}" - reg_flag: --block-registry - - reg_conf_var: INSECURE_REGISTRY - reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}" - reg_flag: --insecure-registry - notify: - - restart docker - -- name: Set Proxy Settings - lineinfile: - dest: /etc/sysconfig/docker - regexp: '^{{ item.reg_conf_var }}=.*$' - line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'" - state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}" - with_items: - - reg_conf_var: HTTP_PROXY - reg_fact_val: "{{ docker_http_proxy | default('') }}" - - reg_conf_var: HTTPS_PROXY - reg_fact_val: "{{ docker_https_proxy | default('') }}" - - reg_conf_var: NO_PROXY - reg_fact_val: "{{ docker_no_proxy | default('') }}" - notify: - - restart docker - when: - - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common' - -- name: Set various Docker options - lineinfile: - dest: /etc/sysconfig/docker - regexp: '^OPTIONS=.*$' - line: "OPTIONS='\ - {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\ - {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\ - {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\ - {% if docker_options is defined %} {{ docker_options }}{% endif %}\ - {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'" - when: docker_check.stat.isreg is defined and docker_check.stat.isreg - notify: - - restart docker - -- name: Start the Docker service - systemd: - name: docker - enabled: yes - state: started - daemon_reload: yes - register: start_result - - set_fact: - docker_service_status_changed: start_result | changed + l_use_system_container: "{{ openshift.docker.use_system_container | default(False) }}" + +- name: Use Package Docker if Requested + include: package_docker.yml + when: not l_use_system_container -- meta: flush_handlers +- name: Use System Container Docker if Requested + include: systemcontainer_docker.yml + when: l_use_system_container diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml new file mode 100644 index 000000000..10fb5772c --- /dev/null +++ b/roles/docker/tasks/package_docker.yml @@ -0,0 +1,116 @@ +--- +- name: Get current installed Docker version + command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" + when: not openshift.common.is_atomic | bool + register: curr_docker_version + changed_when: false + +- name: Error out if Docker pre-installed but too old + fail: + msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required." + when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined + +- name: Error out if requested Docker is too old + fail: + msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required." + when: docker_version is defined and docker_version | version_compare('1.9.1', '<') + +# If a docker_version was requested, sanity check that we can install or upgrade to it, and +# no downgrade is required. +- name: Fail if Docker version requested but downgrade is required + fail: + msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested." + when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>') + +# This involves an extremely slow migration process, users should instead run the +# Docker 1.10 upgrade playbook to accomplish this. +- name: Error out if attempting to upgrade Docker across the 1.10 boundary + fail: + msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed." + when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=') + +# Make sure Docker is installed, but does not update a running version. +# Docker upgrades are handled by a separate playbook. +- name: Install Docker + package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present + when: not openshift.common.is_atomic | bool + +- block: + # Extend the default Docker service unit file when using iptables-services + - name: Ensure docker.service.d directory exists + file: + path: "{{ docker_systemd_dir }}" + state: directory + + - name: Configure Docker service unit file + template: + dest: "{{ docker_systemd_dir }}/custom.conf" + src: custom.conf.j2 + when: not os_firewall_use_firewalld | default(True) | bool + +- stat: path=/etc/sysconfig/docker + register: docker_check + +- name: Set registry params + lineinfile: + dest: /etc/sysconfig/docker + regexp: '^{{ item.reg_conf_var }}=.*$' + line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" + when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg + with_items: + - reg_conf_var: ADD_REGISTRY + reg_fact_val: "{{ docker_additional_registries | default(None, true)}}" + reg_flag: --add-registry + - reg_conf_var: BLOCK_REGISTRY + reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}" + reg_flag: --block-registry + - reg_conf_var: INSECURE_REGISTRY + reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}" + reg_flag: --insecure-registry + notify: + - restart docker + +- name: Set Proxy Settings + lineinfile: + dest: /etc/sysconfig/docker + regexp: '^{{ item.reg_conf_var }}=.*$' + line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'" + state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}" + with_items: + - reg_conf_var: HTTP_PROXY + reg_fact_val: "{{ docker_http_proxy | default('') }}" + - reg_conf_var: HTTPS_PROXY + reg_fact_val: "{{ docker_https_proxy | default('') }}" + - reg_conf_var: NO_PROXY + reg_fact_val: "{{ docker_no_proxy | default('') }}" + notify: + - restart docker + when: + - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common' + +- name: Set various Docker options + lineinfile: + dest: /etc/sysconfig/docker + regexp: '^OPTIONS=.*$' + line: "OPTIONS='\ + {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\ + {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\ + {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\ + {% if docker_options is defined %} {{ docker_options }}{% endif %}\ + {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'" + when: docker_check.stat.isreg is defined and docker_check.stat.isreg + notify: + - restart docker + +- name: Start the Docker service + systemd: + name: docker + enabled: yes + state: started + daemon_reload: yes + register: start_result + +- set_fact: + docker_service_status_changed: start_result | changed + +- meta: flush_handlers diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml new file mode 100644 index 000000000..722232a9b --- /dev/null +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -0,0 +1,143 @@ +--- +# If docker_options are provided we should fail. We should not install docker and ignore +# the users configuration. NOTE: docker_options == inventory:openshift_docker_options +- name: Fail quickly if openshift_docker_options are set + assert: + that: + - docker_options is defined + - docker_options != "" + msg: | + Docker via System Container does not allow for the use of the openshift_docker_options + variable. If you want to use openshift_docker_options you will need to use the + traditional docker package install. Otherwise, comment out openshift_docker_options + in your inventory file. + +# Used to pull and install the system container +- name: Ensure atomic is installed + package: + name: atomic + state: present + when: not openshift.common.is_atomic | bool + +# At the time of writing the atomic command requires runc for it's own use. This +# task is here in the even that the atomic package ever removes the dependency. +- name: Ensure runc is installed + package: + name: runc + state: present + when: not openshift.common.is_atomic | bool + +# If we are on atomic, set http_proxy and https_proxy in /etc/atomic.conf +- block: + + - name: Add http_proxy to /etc/atomic.conf + lineinfile: + path: /etc/atomic.conf + line: "http_proxy={{ openshift.common.http_proxy | default('') }}" + when: + - openshift.common.http_proxy is defined + - openshift.common.http_proxy != '' + + - name: Add https_proxy to /etc/atomic.conf + lineinfile: + path: /etc/atomic.conf + line: "https_proxy={{ openshift.common.https_proxy | default('') }}" + when: + - openshift.common.https_proxy is defined + - openshift.common.https_proxy != '' + + when: openshift.common.is_atomic | bool + + +- block: + + - name: Set to default prepend + set_fact: + l_docker_image_prepend: "gscrivano" + + - name: Use Red Hat Registry for image when distribution is Red Hat + set_fact: + l_docker_image_prepend: "registry.access.redhat.com/openshift3" + when: ansible_distribution == 'RedHat' + + - name: Use Fedora Registry for image when distribution is Fedora + set_fact: + l_docker_image_prepend: "registry.fedoraproject.org" + when: ansible_distribution == 'Fedora' + + # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504 + - name: Use a testing registry if requested + set_fact: + l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}" + when: + - openshift_docker_systemcontainer_image_registry_override is defined + - openshift_docker_systemcontainer_image_registry_override != "" + + - name: Set the full image name + set_fact: + l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest" + +- name: Pre-pull Container Enginer System Container image + command: "atomic pull --storage ostree {{ l_docker_image }}" + changed_when: false + +# Make sure docker is disabled Errors are ignored as docker may not +# be installed. +- name: Disable Docker + systemd: + name: docker + enabled: no + state: stopped + daemon_reload: yes + ignore_errors: True + +- name: Ensure docker.service.d directory exists + file: + path: "{{ docker_systemd_dir }}" + state: directory + +- name: Ensure /etc/docker directory exists + file: + path: "{{ docker_conf_dir }}" + state: directory + +- name: Install Container Enginer System Container + oc_atomic_container: + name: "{{ openshift.docker.service_name }}" + image: "{{ l_docker_image }}" + state: latest + values: + - "system-package=no" + +- name: Configure Container Engine Service File + template: + dest: "{{ docker_systemd_dir }}/custom.conf" + src: systemcontainercustom.conf.j2 + +# Set local versions of facts that must be in json format for daemon.json +# NOTE: When jinja2.9+ is used the daemon.json file can move to using tojson +- set_fact: + l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}" + l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}" + l_docker_additional_registries: "{{ docker_additional_registries | default([]) | to_json }}" + l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}" + +# Configure container-engine using the daemon.json file +- name: Configure Container Engine + template: + dest: "{{ docker_conf_dir }}/daemon.json" + src: daemon.json + +# Enable and start the container-engine service +- name: Start the Container Engine service + systemd: + name: "{{ openshift.docker.service_name }}" + enabled: yes + state: started + daemon_reload: yes + register: start_result + +- set_fact: + docker_service_status_changed: start_result | changed + +- meta: flush_handlers diff --git a/roles/docker/templates/daemon.json b/roles/docker/templates/daemon.json new file mode 100644 index 000000000..7ea8164b3 --- /dev/null +++ b/roles/docker/templates/daemon.json @@ -0,0 +1,66 @@ + +{ + "api-cors-header": "", + "authorization-plugins": ["rhel-push-plugin"], + "bip": "", + "bridge": "", + "cgroup-parent": "", + "cluster-store": "", + "cluster-store-opts": {}, + "cluster-advertise": "", + "debug": true, + "default-gateway": "", + "default-gateway-v6": "", + "default-runtime": "oci", + "containerd": "/run/containerd.sock", + "default-ulimits": {}, + "disable-legacy-registry": false, + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": ["native.cgroupdriver=systemd"], + "exec-root": "", + "fixed-cidr": "", + "fixed-cidr-v6": "", + "graph": "", + "group": "", + "hosts": [], + "icc": false, + "insecure-registries": {{ l_docker_insecure_registries }}, + "ip": "0.0.0.0", + "iptables": false, + "ipv6": false, + "ip-forward": false, + "ip-masq": false, + "labels": [], + "live-restore": true, +{% if docker_log_driver is defined %} + "log-driver": "{{ docker_log_driver }}", +{%- endif %} + "log-level": "", + "log-opts": {{ l_docker_log_options }}, + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "mtu": 0, + "oom-score-adjust": -500, + "pidfile": "", + "raw-logs": false, + "registry-mirrors": [], + "runtimes": { + "oci": { + "path": "/usr/libexec/docker/docker-runc-current" + } + }, + "selinux-enabled": {{ docker_selinux_enabled|default(true) }}, + "storage-driver": "", + "storage-opts": [], + "tls": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "tlsverify": true, + "userns-remap": "", + "add-registry": {{ l_docker_additional_registries }}, + "blocked-registries": {{ l_docker_blocked_registries }}, + "userland-proxy-path": "/usr/libexec/docker/docker-proxy-current" +} diff --git a/roles/docker/templates/systemcontainercustom.conf.j2 b/roles/docker/templates/systemcontainercustom.conf.j2 new file mode 100644 index 000000000..a4fb01d2b --- /dev/null +++ b/roles/docker/templates/systemcontainercustom.conf.j2 @@ -0,0 +1,17 @@ +# {{ ansible_managed }} + +[Service] +{%- if "http_proxy" in openshift.common %} +ENVIRONMENT=HTTP_PROXY={{ docker_http_proxy }} +{%- endif -%} +{%- if "https_proxy" in openshift.common %} +ENVIRONMENT=HTTPS_PROXY={{ docker_http_proxy }} +{%- endif -%} +{%- if "no_proxy" in openshift.common %} +ENVIRONMENT=NO_PROXY={{ docker_no_proxy }} +{%- endif %} +{%- if os_firewall_use_firewalld|default(true) %} +[Unit] +Wants=iptables.service +After=iptables.service +{%- endif %} diff --git a/roles/docker/vars/main.yml b/roles/docker/vars/main.yml index 5237ed8f2..0082ded1e 100644 --- a/roles/docker/vars/main.yml +++ b/roles/docker/vars/main.yml @@ -1,3 +1,4 @@ --- -udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d docker_systemd_dir: /etc/systemd/system/docker.service.d +docker_conf_dir: /etc/docker/ +udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 29153f4df..e45f53219 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -13,5 +13,4 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" -etcd_data_dir: /var/lib/etcd/ etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d" diff --git a/roles/etcd/files/etcdctl.sh b/roles/etcd/files/etcdctl.sh deleted file mode 100644 index 0e324a8a9..000000000 --- a/roles/etcd/files/etcdctl.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because -# command flags are different between the two. Should work on stand -# alone etcd hosts and master + etcd hosts too because we use the peer keys. -etcdctl2() { - /usr/bin/etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://`hostname`:2379 ${@} -} - -etcdctl3() { - ETCDCTL_API=3 /usr/bin/etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints https://`hostname`:2379 ${@} -} diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index e0c70a181..689c07a84 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -24,3 +24,4 @@ dependencies: - service: etcd peering port: "{{ etcd_peer_port }}/tcp" - role: etcd_server_certificates +- role: etcd_common diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index c09da3b61..fa2f44609 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -10,51 +10,45 @@ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present when: not etcd_is_containerized | bool -- name: Pull etcd container - command: docker pull {{ openshift.etcd.etcd_image }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" +- block: + - name: Pull etcd container + command: docker pull {{ openshift.etcd.etcd_image }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + + - name: Install etcd container service file + template: + dest: "/etc/systemd/system/etcd_container.service" + src: etcd.docker.service when: - etcd_is_containerized | bool - not openshift.common.is_etcd_system_container | bool -- name: Install etcd container service file - template: - dest: "/etc/systemd/system/etcd_container.service" - src: etcd.docker.service - when: - - etcd_is_containerized | bool - - not openshift.common.is_etcd_system_container | bool - - # Start secondary etcd instance for third party integrations # TODO: Determine an alternative to using thirdparty variable - -- name: Create configuration directory - file: - path: "{{ etcd_conf_dir }}" - state: directory - mode: 0700 - when: etcd_is_thirdparty | bool +- block: + - name: Create configuration directory + file: + path: "{{ etcd_conf_dir }}" + state: directory + mode: 0700 # TODO: retest with symlink to confirm it does or does not function -- name: Copy service file for etcd instance - copy: - src: /usr/lib/systemd/system/etcd.service - dest: "/etc/systemd/system/{{ etcd_service }}.service" - remote_src: True - when: etcd_is_thirdparty | bool - -- name: Create third party etcd service.d directory exists - file: - path: "{{ etcd_systemd_dir }}" - state: directory - when: etcd_is_thirdparty | bool - -- name: Configure third part etcd service unit file - template: - dest: "{{ etcd_systemd_dir }}/custom.conf" - src: custom.conf.j2 + - name: Copy service file for etcd instance + copy: + src: /usr/lib/systemd/system/etcd.service + dest: "/etc/systemd/system/{{ etcd_service }}.service" + remote_src: True + + - name: Create third party etcd service.d directory exists + file: + path: "{{ etcd_systemd_dir }}" + state: directory + + - name: Configure third part etcd service unit file + template: + dest: "{{ etcd_systemd_dir }}/custom.conf" + src: custom.conf.j2 when: etcd_is_thirdparty # TODO: this task may not be needed with Validate permissions @@ -80,28 +74,28 @@ command: systemctl daemon-reload when: etcd_is_thirdparty | bool -- name: Disable system etcd when containerized - systemd: - name: etcd - state: stopped - enabled: no - masked: yes - daemon_reload: yes - when: - - etcd_is_containerized | bool - - not openshift.common.is_etcd_system_container | bool - register: task_result - failed_when: "task_result|failed and 'could not' not in task_result.msg|lower" - -- name: Install etcd container service file - template: - dest: "/etc/systemd/system/etcd_container.service" - src: etcd.docker.service - when: etcd_is_containerized | bool and not openshift.common.is_etcd_system_container | bool - -- name: Install Etcd system container - include: system_container.yml - when: etcd_is_containerized | bool and openshift.common.is_etcd_system_container | bool +- block: + - name: Disable system etcd when containerized + systemd: + name: etcd + state: stopped + enabled: no + masked: yes + daemon_reload: yes + when: not openshift.common.is_etcd_system_container | bool + register: task_result + failed_when: task_result|failed and 'could not' not in task_result.msg|lower + + - name: Install etcd container service file + template: + dest: "/etc/systemd/system/etcd_container.service" + src: etcd.docker.service + when: not openshift.common.is_etcd_system_container | bool + + - name: Install Etcd system container + include: system_container.yml + when: openshift.common.is_etcd_system_container | bool + when: etcd_is_containerized | bool - name: Validate permissions on the config dir file: @@ -126,7 +120,9 @@ enabled: yes register: start_result -- include: etcdctl.yml +- include_role: + name: etcd_common + tasks_from: etcdctl.yml when: openshift_etcd_etcdctl_profile | default(true) | bool - name: Set fact etcd_service_status_changed diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service index e4d1b57e6..c8ceaa6ba 100644 --- a/roles/etcd/templates/etcd.docker.service +++ b/roles/etcd/templates/etcd.docker.service @@ -5,9 +5,9 @@ Requires=docker.service PartOf=docker.service [Service] -EnvironmentFile=/etc/etcd/etcd.conf +EnvironmentFile={{ etcd_conf_file }} ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }} -ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --env-file=/etc/sysconfig/etcd --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }} +ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }} ExecStop=/usr/bin/docker stop {{ etcd_service }} SyslogIdentifier=etcd_container Restart=always diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md index 131a01490..d1c3a6602 100644 --- a/roles/etcd_common/README.md +++ b/roles/etcd_common/README.md @@ -1,17 +1,21 @@ etcd_common ======================== -TODO +Common resources for dependent etcd roles. E.g. default variables for: +* config directories +* certificates +* ports +* other settings -Requirements ------------- - -TODO +Or `delegated_serial_command` ansible module for executing a command on a remote node. E.g. -Role Variables --------------- +```yaml +- delegated_serial_command: + command: /usr/bin/make_database.sh arg1 arg2 + creates: /path/to/database +``` -TODO +Or etcdctl.yml playbook for installation of `etcdctl` aliases on a node (see example). Dependencies ------------ @@ -21,7 +25,22 @@ openshift-repos Example Playbook ---------------- -TODO +**Drop etcdctl aliases** + +```yaml +- include_role: + name: etcd_common + tasks_from: etcdctl +``` + +**Get access to common variables** + +```yaml +# meta.yml of etcd +... +dependencies: +- { role: etcd_common } +``` License ------- diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml index c5efb0a0c..d12e6a07f 100644 --- a/roles/etcd_common/defaults/main.yml +++ b/roles/etcd_common/defaults/main.yml @@ -35,3 +35,6 @@ etcd_ip: "{{ ansible_default_ipv4.address }}" etcd_is_atomic: False etcd_is_containerized: False etcd_is_thirdparty: False + +# etcd dir vars +etcd_data_dir: /var/lib/etcd/ diff --git a/roles/etcd/tasks/etcdctl.yml b/roles/etcd_common/tasks/etcdctl.yml index 649ad23c1..6cb456677 100644 --- a/roles/etcd/tasks/etcdctl.yml +++ b/roles/etcd_common/tasks/etcdctl.yml @@ -4,9 +4,9 @@ when: not openshift.common.is_atomic | bool - name: Configure etcd profile.d alises - copy: - src: etcdctl.sh - dest: /etc/profile.d/etcdctl.sh + template: + dest: "/etc/profile.d/etcdctl.sh" + src: etcdctl.sh.j2 mode: 0755 owner: root group: root diff --git a/roles/etcd_common/templates/etcdctl.sh.j2 b/roles/etcd_common/templates/etcdctl.sh.j2 new file mode 100644 index 000000000..ac7d9c72f --- /dev/null +++ b/roles/etcd_common/templates/etcdctl.sh.j2 @@ -0,0 +1,12 @@ +#!/bin/bash +# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because +# command flags are different between the two. Should work on stand +# alone etcd hosts and master + etcd hosts too because we use the peer keys. +etcdctl2() { + /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://`hostname`:2379 ${@} + +} + +etcdctl3() { + ETCDCTL_API=3 /usr/bin/etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints https://`hostname`:2379 ${@} +} diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml index 98c913dba..b453f2bd8 100644 --- a/roles/etcd_server_certificates/meta/main.yml +++ b/roles/etcd_server_certificates/meta/main.yml @@ -13,4 +13,4 @@ galaxy_info: - cloud - system dependencies: -- role: openshift_etcd_ca +- role: etcd_ca diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml index 94d1d18fb..c60c2115a 100644 --- a/roles/flannel/handlers/main.yml +++ b/roles/flannel/handlers/main.yml @@ -5,4 +5,6 @@ - name: restart docker become: yes - systemd: name=docker state=restarted + systemd: + name: "{{ openshift.docker.service_name }}" + state: restarted diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 4d083c4d5..7039a0cec 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -900,6 +900,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1073,7 +1080,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 48e80a7cd..ae5806137 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -886,6 +886,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1059,7 +1066,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 35168d1a3..36eb294a8 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -872,6 +872,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1045,7 +1052,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 5f7e4b8fa..bedd45922 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -872,6 +872,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1045,7 +1052,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): @@ -1960,7 +1967,7 @@ class PolicyUser(OpenShiftCLI): @property def policybindings(self): if self._policy_bindings is None: - results = self._get('clusterpolicybindings', None) + results = self._get('policybindings', None) if results['returncode'] != 0: raise OpenShiftCLIError('Could not retrieve policybindings') self._policy_bindings = results['results'][0]['items'][0] diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index a6718d921..c6fa85f90 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -990,6 +990,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1163,7 +1170,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): @@ -2531,25 +2538,34 @@ class Registry(OpenShiftCLI): def run_ansible(params, check_mode): '''run idempotent ansible code''' + registry_options = {'images': {'value': params['images'], 'include': True}, + 'latest_images': {'value': params['latest_images'], 'include': True}, + 'labels': {'value': params['labels'], 'include': True}, + 'ports': {'value': ','.join(params['ports']), 'include': True}, + 'replicas': {'value': params['replicas'], 'include': True}, + 'selector': {'value': params['selector'], 'include': True}, + 'service_account': {'value': params['service_account'], 'include': True}, + 'mount_host': {'value': params['mount_host'], 'include': True}, + 'env_vars': {'value': params['env_vars'], 'include': False}, + 'volume_mounts': {'value': params['volume_mounts'], 'include': False}, + 'edits': {'value': params['edits'], 'include': False}, + 'tls_key': {'value': params['tls_key'], 'include': True}, + 'tls_certificate': {'value': params['tls_certificate'], 'include': True}, + } + + # Do not always pass the daemonset and enforce-quota parameters because they are not understood + # by old versions of oc. + # Default value is false. So, it's safe to not pass an explicit false value to oc versions which + # understand these parameters. + if params['daemonset']: + registry_options['daemonset'] = {'value': params['daemonset'], 'include': True} + if params['enforce_quota']: + registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True} + rconfig = RegistryConfig(params['name'], params['namespace'], params['kubeconfig'], - {'images': {'value': params['images'], 'include': True}, - 'latest_images': {'value': params['latest_images'], 'include': True}, - 'labels': {'value': params['labels'], 'include': True}, - 'ports': {'value': ','.join(params['ports']), 'include': True}, - 'replicas': {'value': params['replicas'], 'include': True}, - 'selector': {'value': params['selector'], 'include': True}, - 'service_account': {'value': params['service_account'], 'include': True}, - 'mount_host': {'value': params['mount_host'], 'include': True}, - 'env_vars': {'value': params['env_vars'], 'include': False}, - 'volume_mounts': {'value': params['volume_mounts'], 'include': False}, - 'edits': {'value': params['edits'], 'include': False}, - 'enforce_quota': {'value': params['enforce_quota'], 'include': True}, - 'daemonset': {'value': params['daemonset'], 'include': True}, - 'tls_key': {'value': params['tls_key'], 'include': True}, - 'tls_certificate': {'value': params['tls_certificate'], 'include': True}, - }) + registry_options) ocregistry = Registry(rconfig, params['debug']) diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 0e4b336fb..8a4f93372 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1015,6 +1015,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1188,7 +1195,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index a34ce351e..d81c29784 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -864,6 +864,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1037,7 +1044,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): @@ -1531,10 +1538,10 @@ class Rule(object): results = [] for rule in inc_rules: - results.append(Rule(rule['apiGroups'], - rule['attributeRestrictions'], - rule['resources'], - rule['verbs'])) + results.append(Rule(rule.get('apiGroups', ['']), + rule.get('attributeRestrictions', None), + rule.get('resources', []), + rule.get('verbs', []))) return results @@ -1633,7 +1640,7 @@ class OCClusterRole(OpenShiftCLI): @property def clusterrole(self): ''' property for clusterrole''' - if not self._clusterrole: + if self._clusterrole is None: self.get() return self._clusterrole @@ -1669,6 +1676,7 @@ class OCClusterRole(OpenShiftCLI): elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']: result['returncode'] = 0 + self.clusterrole = None return result @@ -1738,6 +1746,9 @@ class OCClusterRole(OpenShiftCLI): # Create it here api_rval = oc_clusterrole.create() + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + # return the created object api_rval = oc_clusterrole.get() diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 69dd23a0e..bdcb3f278 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -870,6 +870,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1043,7 +1050,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index 70329ccfe..be1b3a01e 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -914,6 +914,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1087,7 +1094,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index bda5eebc5..4ac6e4aeb 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -881,6 +881,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1054,7 +1061,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index 462e14868..b6f058340 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -854,6 +854,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1027,7 +1034,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index 8aed060bb..c094c9472 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -873,6 +873,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1046,7 +1053,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index 0d18a7afe..a76dd44c4 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -890,6 +890,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1063,7 +1070,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 0b01670c6..bdce28045 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -98,7 +98,7 @@ options: aliases: [] kind: description: - - The kind attribute of the object. e.g. dc, bc, svc, route + - The kind attribute of the object. e.g. dc, bc, svc, route. May be a comma-separated list, e.g. "dc,po,svc". required: True default: None aliases: [] @@ -893,6 +893,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1066,7 +1073,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): @@ -1539,12 +1546,9 @@ class OCObject(OpenShiftCLI): # Delete ######## if state == 'absent': - # if we were passed a name, verify its not in our results - if params['name'] is not None and not Utils.exists(api_rval['results'], params['name']): - return {'changed': False, 'state': state} - - # verify results are empty for the selector - if params['selector'] is not None and len(api_rval['results']) == 0: + # verify its not in our results + if (params['name'] is not None or params['selector'] is not None) and \ + (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0): return {'changed': False, 'state': state} if check_mode: diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index 9b321b47c..aeb4e5686 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -825,6 +825,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -998,7 +1005,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 34f80ce13..f7aa8c0d2 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -882,6 +882,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1055,7 +1062,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index 331f31e41..b044a47ce 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -879,6 +879,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1052,7 +1059,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 3e4601cc3..8604cc2f3 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -874,6 +874,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1047,7 +1054,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index 755ab3b02..fef48daf0 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -924,6 +924,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1097,7 +1104,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index 0c83338b0..384df0ee3 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -868,6 +868,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1041,7 +1048,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 26e52a926..443750c5d 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -914,6 +914,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1087,7 +1094,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index 440cda1b3..7537bdb5b 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -920,6 +920,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1093,7 +1100,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 5eb36ee32..03a4dd3b9 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -866,6 +866,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1039,7 +1046,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 1bc788e87..db1010694 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -866,6 +866,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1039,7 +1046,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index 3009e661a..c3885c1ac 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -926,6 +926,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1099,7 +1106,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 88f295a74..5c4596c09 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -838,6 +838,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1011,7 +1018,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 9508f2157..b89215510 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -915,6 +915,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1088,7 +1095,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py index 88fcc1ddc..37a685ebb 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_user.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py @@ -46,7 +46,7 @@ class PolicyUser(OpenShiftCLI): @property def policybindings(self): if self._policy_bindings is None: - results = self._get('clusterpolicybindings', None) + results = self._get('policybindings', None) if results['returncode'] != 0: raise OpenShiftCLIError('Could not retrieve policybindings') self._policy_bindings = results['results'][0]['items'][0] diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py index 720b44cdc..3c130fe28 100644 --- a/roles/lib_openshift/src/class/oc_adm_registry.py +++ b/roles/lib_openshift/src/class/oc_adm_registry.py @@ -331,25 +331,34 @@ class Registry(OpenShiftCLI): def run_ansible(params, check_mode): '''run idempotent ansible code''' + registry_options = {'images': {'value': params['images'], 'include': True}, + 'latest_images': {'value': params['latest_images'], 'include': True}, + 'labels': {'value': params['labels'], 'include': True}, + 'ports': {'value': ','.join(params['ports']), 'include': True}, + 'replicas': {'value': params['replicas'], 'include': True}, + 'selector': {'value': params['selector'], 'include': True}, + 'service_account': {'value': params['service_account'], 'include': True}, + 'mount_host': {'value': params['mount_host'], 'include': True}, + 'env_vars': {'value': params['env_vars'], 'include': False}, + 'volume_mounts': {'value': params['volume_mounts'], 'include': False}, + 'edits': {'value': params['edits'], 'include': False}, + 'tls_key': {'value': params['tls_key'], 'include': True}, + 'tls_certificate': {'value': params['tls_certificate'], 'include': True}, + } + + # Do not always pass the daemonset and enforce-quota parameters because they are not understood + # by old versions of oc. + # Default value is false. So, it's safe to not pass an explicit false value to oc versions which + # understand these parameters. + if params['daemonset']: + registry_options['daemonset'] = {'value': params['daemonset'], 'include': True} + if params['enforce_quota']: + registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True} + rconfig = RegistryConfig(params['name'], params['namespace'], params['kubeconfig'], - {'images': {'value': params['images'], 'include': True}, - 'latest_images': {'value': params['latest_images'], 'include': True}, - 'labels': {'value': params['labels'], 'include': True}, - 'ports': {'value': ','.join(params['ports']), 'include': True}, - 'replicas': {'value': params['replicas'], 'include': True}, - 'selector': {'value': params['selector'], 'include': True}, - 'service_account': {'value': params['service_account'], 'include': True}, - 'mount_host': {'value': params['mount_host'], 'include': True}, - 'env_vars': {'value': params['env_vars'], 'include': False}, - 'volume_mounts': {'value': params['volume_mounts'], 'include': False}, - 'edits': {'value': params['edits'], 'include': False}, - 'enforce_quota': {'value': params['enforce_quota'], 'include': True}, - 'daemonset': {'value': params['daemonset'], 'include': True}, - 'tls_key': {'value': params['tls_key'], 'include': True}, - 'tls_certificate': {'value': params['tls_certificate'], 'include': True}, - }) + registry_options) ocregistry = Registry(rconfig, params['debug']) diff --git a/roles/lib_openshift/src/class/oc_clusterrole.py b/roles/lib_openshift/src/class/oc_clusterrole.py index 1d3d977db..ae6795446 100644 --- a/roles/lib_openshift/src/class/oc_clusterrole.py +++ b/roles/lib_openshift/src/class/oc_clusterrole.py @@ -22,7 +22,7 @@ class OCClusterRole(OpenShiftCLI): @property def clusterrole(self): ''' property for clusterrole''' - if not self._clusterrole: + if self._clusterrole is None: self.get() return self._clusterrole @@ -58,6 +58,7 @@ class OCClusterRole(OpenShiftCLI): elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']: result['returncode'] = 0 + self.clusterrole = None return result @@ -127,6 +128,9 @@ class OCClusterRole(OpenShiftCLI): # Create it here api_rval = oc_clusterrole.create() + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + # return the created object api_rval = oc_clusterrole.get() diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py index 667b98eac..6f0da3d5c 100644 --- a/roles/lib_openshift/src/class/oc_obj.py +++ b/roles/lib_openshift/src/class/oc_obj.py @@ -115,12 +115,9 @@ class OCObject(OpenShiftCLI): # Delete ######## if state == 'absent': - # if we were passed a name, verify its not in our results - if params['name'] is not None and not Utils.exists(api_rval['results'], params['name']): - return {'changed': False, 'state': state} - - # verify results are empty for the selector - if params['selector'] is not None and len(api_rval['results']) == 0: + # verify its not in our results + if (params['name'] is not None or params['selector'] is not None) and \ + (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0): return {'changed': False, 'state': state} if check_mode: diff --git a/roles/lib_openshift/src/doc/obj b/roles/lib_openshift/src/doc/obj index e44843eb3..4ff912b2d 100644 --- a/roles/lib_openshift/src/doc/obj +++ b/roles/lib_openshift/src/doc/obj @@ -47,7 +47,7 @@ options: aliases: [] kind: description: - - The kind attribute of the object. e.g. dc, bc, svc, route + - The kind attribute of the object. e.g. dc, bc, svc, route. May be a comma-separated list, e.g. "dc,po,svc". required: True default: None aliases: [] diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index 1868b1420..2bf795e25 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -76,6 +76,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -249,7 +256,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/src/lib/rule.py b/roles/lib_openshift/src/lib/rule.py index 4590dcf90..fe5ed9723 100644 --- a/roles/lib_openshift/src/lib/rule.py +++ b/roles/lib_openshift/src/lib/rule.py @@ -136,9 +136,9 @@ class Rule(object): results = [] for rule in inc_rules: - results.append(Rule(rule['apiGroups'], - rule['attributeRestrictions'], - rule['resources'], - rule['verbs'])) + results.append(Rule(rule.get('apiGroups', ['']), + rule.get('attributeRestrictions', None), + rule.get('resources', []), + rule.get('verbs', []))) return results diff --git a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py b/roles/lib_openshift/src/test/integration/filter_plugins/filters.py index 6990a11a8..f350bd25d 100644 --- a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py +++ b/roles/lib_openshift/src/test/integration/filter_plugins/filters.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Custom filters for use in testing ''' diff --git a/roles/lib_openshift/src/test/integration/oc_label.yml b/roles/lib_openshift/src/test/integration/oc_label.yml index b4e721407..22cf687c5 100755 --- a/roles/lib_openshift/src/test/integration/oc_label.yml +++ b/roles/lib_openshift/src/test/integration/oc_label.yml @@ -15,7 +15,7 @@ - name: ensure needed vars are defined fail: msg: "{{ item }} not defined" - when: "{{ item }} is not defined" + when: item is not defined with_items: - cli_master_test # ansible inventory instance to run playbook against diff --git a/roles/lib_openshift/src/test/integration/oc_user.yml b/roles/lib_openshift/src/test/integration/oc_user.yml index ad1f9d188..9b4290052 100755 --- a/roles/lib_openshift/src/test/integration/oc_user.yml +++ b/roles/lib_openshift/src/test/integration/oc_user.yml @@ -14,7 +14,7 @@ - name: ensure needed vars are defined fail: msg: "{{ item }} no defined" - when: "{{ item}} is not defined" + when: item is not defined with_items: - cli_master_test # ansible inventory instance to run playbook against diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py index bab36fddc..97cf86170 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py +++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py @@ -205,10 +205,11 @@ class RegistryTest(unittest.TestCase): } ]}''' + @mock.patch('oc_adm_registry.locate_oc_binary') @mock.patch('oc_adm_registry.Utils._write') @mock.patch('oc_adm_registry.Utils.create_tmpfile_copy') @mock.patch('oc_adm_registry.Registry._run') - def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write): + def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary): ''' Testing state present ''' params = {'state': 'present', 'debug': False, @@ -240,10 +241,9 @@ class RegistryTest(unittest.TestCase): (0, '', ''), ] - mock_tmpfile_copy.side_effect = [ - '/tmp/mocked_kubeconfig', - '/tmp/mocked_kubeconfig', - ] + mock_tmpfile_copy.return_value = '/tmp/mocked_kubeconfig' + + mock_oc_binary.return_value = 'oc' results = Registry.run_ansible(params, False) @@ -254,7 +254,7 @@ class RegistryTest(unittest.TestCase): mock_cmd.assert_has_calls([ mock.call(['oc', 'get', 'dc', 'docker-registry', '-o', 'json', '-n', 'default'], None), mock.call(['oc', 'get', 'svc', 'docker-registry', '-o', 'json', '-n', 'default'], None), - mock.call(['oc', 'adm', 'registry', '--daemonset=False', '--enforce-quota=False', + mock.call(['oc', 'adm', 'registry', '--ports=5000', '--replicas=1', '--selector=type=infra', '--service-account=registry', '--dry-run=True', '-o', 'json', '-n', 'default'], None), mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py index 51393dbaf..5481ac623 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py +++ b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py @@ -286,10 +286,11 @@ class RouterTest(unittest.TestCase): ] }''' + @mock.patch('oc_adm_router.locate_oc_binary') @mock.patch('oc_adm_router.Utils._write') @mock.patch('oc_adm_router.Utils.create_tmpfile_copy') @mock.patch('oc_adm_router.Router._run') - def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write): + def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary): ''' Testing a create ''' params = {'state': 'present', 'debug': False, @@ -345,6 +346,10 @@ class RouterTest(unittest.TestCase): '/tmp/mocked_kubeconfig', ] + mock_oc_binary.side_effect = [ + 'oc', + ] + results = Router.run_ansible(params, False) self.assertTrue(results['changed']) diff --git a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py index da326742f..b19a5a880 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py +++ b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py @@ -25,9 +25,10 @@ class OCObjectValidatorTest(unittest.TestCase): maxDiff = None + @mock.patch('oc_objectvalidator.locate_oc_binary') @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy') @mock.patch('oc_objectvalidator.OCObjectValidator._run') - def test_no_data(self, mock_cmd, mock_tmpfile_copy): + def test_no_data(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary): ''' Testing when both all objects are empty ''' # Arrange @@ -62,6 +63,10 @@ class OCObjectValidatorTest(unittest.TestCase): '/tmp/mocked_kubeconfig', ] + mock_oc_binary.side_effect = [ + 'oc', + ] + # Act results = OCObjectValidator.run_ansible(params) @@ -76,9 +81,10 @@ class OCObjectValidatorTest(unittest.TestCase): mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None), ]) + @mock.patch('oc_objectvalidator.locate_oc_binary') @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy') @mock.patch('oc_objectvalidator.OCObjectValidator._run') - def test_error_code(self, mock_cmd, mock_tmpfile_copy): + def test_error_code(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary): ''' Testing when we fail to get objects ''' # Arrange @@ -98,6 +104,10 @@ class OCObjectValidatorTest(unittest.TestCase): '/tmp/mocked_kubeconfig', ] + mock_oc_binary.side_effect = [ + 'oc' + ] + error_results = { 'returncode': 1, 'stderr': 'Error.', @@ -120,9 +130,10 @@ class OCObjectValidatorTest(unittest.TestCase): mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None), ]) + @mock.patch('oc_objectvalidator.locate_oc_binary') @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy') @mock.patch('oc_objectvalidator.OCObjectValidator._run') - def test_valid_both(self, mock_cmd, mock_tmpfile_copy): + def test_valid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary): ''' Testing when both all objects are valid ''' # Arrange @@ -427,6 +438,10 @@ class OCObjectValidatorTest(unittest.TestCase): '/tmp/mocked_kubeconfig', ] + mock_oc_binary.side_effect = [ + 'oc' + ] + # Act results = OCObjectValidator.run_ansible(params) @@ -441,9 +456,10 @@ class OCObjectValidatorTest(unittest.TestCase): mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None), ]) + @mock.patch('oc_objectvalidator.locate_oc_binary') @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy') @mock.patch('oc_objectvalidator.OCObjectValidator._run') - def test_invalid_both(self, mock_cmd, mock_tmpfile_copy): + def test_invalid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary): ''' Testing when all objects are invalid ''' # Arrange @@ -886,6 +902,10 @@ class OCObjectValidatorTest(unittest.TestCase): '/tmp/mocked_kubeconfig', ] + mock_oc_binary.side_effect = [ + 'oc' + ] + # Act results = OCObjectValidator.run_ansible(params) diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index 3b17d9ed6..c7b906949 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -95,7 +95,7 @@ {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %} --certificate-authority {{ legacy_ca_certificate }} {% endfor %} - --hostnames={{ openshift.common.all_hostnames | join(',') }} + --hostnames={{ hostvars[openshift_ca_host].openshift.common.all_hostnames | join(',') }} --master={{ openshift.master.api_url }} --public-master={{ openshift.master.public_api_url }} --cert-dir={{ openshift_ca_config_dir }} diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py index 5f102e960..a2bc9ecdb 100644 --- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py +++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 """ Custom filters for use in openshift-ansible """ @@ -35,7 +34,7 @@ Example playbook usage: become: no run_once: yes delegate_to: localhost - when: "{{ openshift_certificate_expiry_save_json_results|bool }}" + when: openshift_certificate_expiry_save_json_results|bool copy: content: "{{ hostvars|oo_cert_expiry_results_to_json() }}" dest: "{{ openshift_certificate_expiry_json_results_path }}" diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py index c204b5341..0242f5b43 100644 --- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py +++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py @@ -135,7 +135,7 @@ platforms missing the Python OpenSSL library. continue elif l.startswith('Subject:'): - # O=system:nodes, CN=system:node:m01.example.com + # O = system:nodes, CN = system:node:m01.example.com self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1]) def get_serial_number(self): @@ -202,7 +202,7 @@ object""" """ self.subjects = [] for s in subject_string.split(', '): - name, _, value = s.partition('=') + name, _, value = s.partition(' = ') self.subjects.append((name, value)) def get_components(self): diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml index 139d5de6e..b5234bd1e 100644 --- a/roles/openshift_certificate_expiry/tasks/main.yml +++ b/roles/openshift_certificate_expiry/tasks/main.yml @@ -13,12 +13,12 @@ src: cert-expiry-table.html.j2 dest: "{{ openshift_certificate_expiry_html_report_path }}" delegate_to: localhost - when: "{{ openshift_certificate_expiry_generate_html_report|bool }}" + when: openshift_certificate_expiry_generate_html_report|bool - name: Generate the result JSON string run_once: yes set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}" - when: "{{ openshift_certificate_expiry_save_json_results|bool }}" + when: openshift_certificate_expiry_save_json_results|bool - name: Generate results JSON file become: no @@ -27,4 +27,4 @@ src: save_json_results.j2 dest: "{{ openshift_certificate_expiry_json_results_path }}" delegate_to: localhost - when: "{{ openshift_certificate_expiry_save_json_results|bool }}" + when: openshift_certificate_expiry_save_json_results|bool diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py index ccdd48fa8..8a521a765 100644 --- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py +++ b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py @@ -17,7 +17,8 @@ from openshift_cert_expiry import FakeOpenSSLCertificate # noqa: E402 @pytest.fixture(scope='module') def fake_valid_cert(valid_cert): - cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text'] + cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text', + '-nameopt', 'oneline'] cert = subprocess.check_output(cmd) return FakeOpenSSLCertificate(cert.decode('utf8')) diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py index 4ed3e1f01..57ac16602 100644 --- a/roles/openshift_cli/library/openshift_container_binary_sync.py +++ b/roles/openshift_cli/library/openshift_container_binary_sync.py @@ -1,8 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 # pylint: disable=missing-docstring,invalid-name -# import random import tempfile diff --git a/roles/openshift_cloud_provider/tasks/openstack.yml b/roles/openshift_cloud_provider/tasks/openstack.yml index f22dd4520..5788e6d74 100644 --- a/roles/openshift_cloud_provider/tasks/openstack.yml +++ b/roles/openshift_cloud_provider/tasks/openstack.yml @@ -7,4 +7,4 @@ template: dest: "{{ openshift.common.config_base }}/cloudprovider/openstack.conf" src: openstack.conf.j2 - when: "openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)" + when: openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined) diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml index 049ceffe0..350512452 100644 --- a/roles/openshift_docker_facts/tasks/main.yml +++ b/roles/openshift_docker_facts/tasks/main.yml @@ -16,6 +16,7 @@ disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}" hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}" hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}" + use_system_container: "{{ openshift_docker_use_system_container | default(False) }}" - set_fact: docker_additional_registries: "{{ openshift.docker.additional_registries diff --git a/roles/openshift_etcd_ca/tasks/main.yml b/roles/openshift_etcd_ca/tasks/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_etcd_ca/tasks/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh index 0f2bec6d3..c7e51bbfc 100755 --- a/roles/openshift_examples/examples-sync.sh +++ b/roles/openshift_examples/examples-sync.sh @@ -6,7 +6,7 @@ # This script should be run from openshift-ansible/roles/openshift_examples XPAAS_VERSION=ose-v1.3.6 -ORIGIN_VERSION=${1:-v1.6} +ORIGIN_VERSION=${1:-v3.6} RHAMP_TAG=1.0.0.GA RHAMP_TEMPLATE=https://raw.githubusercontent.com/3scale/rhamp-openshift-templates/${RHAMP_TAG}/apicast-gateway/apicast-gateway-template.yml EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION} diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest index 536385712..08751d131 120000 --- a/roles/openshift_examples/files/examples/latest +++ b/roles/openshift_examples/files/examples/latest @@ -1 +1 @@ -v1.6
\ No newline at end of file +v3.6
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json index 1a90a9409..a81dbb654 100644 --- a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json @@ -800,7 +800,7 @@ "openshift.io/display-name": "Jenkins 1.X", "description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.", "iconClass": "icon-jenkins", - "tags": "jenkins", + "tags": "hidden,jenkins", "version": "1.x" }, "from": { diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json index eb94c3bb4..2ed0efe1e 100644 --- a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json @@ -707,7 +707,7 @@ "openshift.io/display-name": "Jenkins 1.X", "description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.", "iconClass": "icon-jenkins", - "tags": "jenkins", + "tags": "hidden,jenkins", "version": "1.x" }, "from": { diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json index fa31f7f61..a2b59c2d3 100644 --- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json +++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json @@ -19,6 +19,17 @@ }, "objects": [ { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}" + }, + "stringData": { + "database-password": "${DATABASE_PASSWORD}", + "connect-string": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}" + } + }, + { "kind": "Service", "apiVersion": "v1", "metadata": { @@ -209,7 +220,12 @@ "env": [ { "name": "ConnectionString", - "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}" + "valueFrom": { + "secretKeyRef": { + "name": "${NAME}", + "key": "connect-string" + } + } } ], "resources": { @@ -373,7 +389,12 @@ }, { "name": "POSTGRESQL_PASSWORD", - "value": "${DATABASE_PASSWORD}" + "valueFrom": { + "secretKeyRef": { + "name": "${NAME}", + "key": "database-password" + } + } }, { "name": "POSTGRESQL_DATABASE", diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml index 14bdd1dca..14bdd1dca 100644 --- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml index 709d8d976..709d8d976 100644 --- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml index 4f25a9c8f..4f25a9c8f 100644 --- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/README.md b/roles/openshift_examples/files/examples/v3.6/db-templates/README.md index a36d7ba7d..a36d7ba7d 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/README.md +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/README.md diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json index f347f1f9f..f347f1f9f 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json index 6ed744777..6ed744777 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json index 97a8abf6d..97a8abf6d 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json index 0656219fb..0656219fb 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json index d60b4647d..d60b4647d 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json index c2bfa40fd..c2bfa40fd 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json index 7a16e742a..7a16e742a 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json index 242212d6f..242212d6f 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json index e9af50937..e9af50937 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json index aa27578a9..aa27578a9 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json index 857ffa980..857ffa980 100644 --- a/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json +++ b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json index 1a90a9409..a81dbb654 100644 --- a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json @@ -800,7 +800,7 @@ "openshift.io/display-name": "Jenkins 1.X", "description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.", "iconClass": "icon-jenkins", - "tags": "jenkins", + "tags": "hidden,jenkins", "version": "1.x" }, "from": { diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json index eb94c3bb4..2ed0efe1e 100644 --- a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json @@ -707,7 +707,7 @@ "openshift.io/display-name": "Jenkins 1.X", "description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.", "iconClass": "icon-jenkins", - "tags": "jenkins", + "tags": "hidden,jenkins", "version": "1.x" }, "from": { diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md index f48d8d4a8..f48d8d4a8 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml index 34f5fcbcc..34f5fcbcc 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json index eb3d296be..eb3d296be 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json index da2454d2e..da2454d2e 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json index 81ae63416..81ae63416 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json index 7a285dba8..7a285dba8 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json index 9f982c286..9f982c286 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json index 7bee85ddd..7bee85ddd 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json index a09d71a00..a09d71a00 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json index fa31f7f61..a2b59c2d3 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json @@ -19,6 +19,17 @@ }, "objects": [ { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}" + }, + "stringData": { + "database-password": "${DATABASE_PASSWORD}", + "connect-string": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}" + } + }, + { "kind": "Service", "apiVersion": "v1", "metadata": { @@ -209,7 +220,12 @@ "env": [ { "name": "ConnectionString", - "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}" + "valueFrom": { + "secretKeyRef": { + "name": "${NAME}", + "key": "connect-string" + } + } } ], "resources": { @@ -373,7 +389,12 @@ }, { "name": "POSTGRESQL_PASSWORD", - "value": "${DATABASE_PASSWORD}" + "valueFrom": { + "secretKeyRef": { + "name": "${NAME}", + "key": "database-password" + } + } }, { "name": "POSTGRESQL_DATABASE", diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json index 264e4b2de..264e4b2de 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json index b47bdf353..b47bdf353 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json index 6ee999cb1..6ee999cb1 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json index 5c177a7e0..5c177a7e0 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json index b400cfdb3..b400cfdb3 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json index fa67412ff..fa67412ff 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json index 9d99973be..9d99973be 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json index 049f3f884..049f3f884 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json index ab35afead..ab35afead 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json index c12f06dec..c12f06dec 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json index 897ce0395..897ce0395 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json index 97d110286..97d110286 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json index 56e76016f..56e76016f 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json index 639ac2e11..639ac2e11 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json index 22ca3f0a0..22ca3f0a0 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json index e1a585d24..e1a585d24 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json index 12720eb19..12720eb19 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json index da8015fb0..da8015fb0 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json index 7d64dac98..7d64dac98 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json index 1e7c03b99..1e7c03b99 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json index 07f926ff3..07f926ff3 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json index 754a3b4c0..754a3b4c0 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json index 8be4ac90b..8be4ac90b 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json index bf9047599..bf9047599 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json index 51e667e02..51e667e02 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json index c5f0d006a..c5f0d006a 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json index 3db0e4c84..3db0e4c84 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json index 72dbb4302..72dbb4302 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json index 9dd847451..9dd847451 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json index 7b1800b7b..7b1800b7b 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json index 31716d84c..31716d84c 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json index 212431056..212431056 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json index 13fbbdd93..13fbbdd93 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json index 69fdec206..69fdec206 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json index 2bd3c249f..2bd3c249f 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json index 31f245950..31f245950 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json index eac964697..eac964697 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json index 09023be71..09023be71 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json index f08cdf2f9..f08cdf2f9 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json index 3ca9e9fab..3ca9e9fab 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json index 83b4d5b24..83b4d5b24 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json index 1292442a4..1292442a4 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json index 99db77d58..99db77d58 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json index c8150c231..c8150c231 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json index f8e5c2b04..f8e5c2b04 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json index 1edeb62e7..1edeb62e7 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json index d11df06ee..d11df06ee 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json index 6b7f6d707..6b7f6d707 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json index 811602220..811602220 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json index 413a6de87..413a6de87 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json index 610ea9441..610ea9441 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json index 6ef9d6e4c..6ef9d6e4c 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json index 9b48f8ae7..9b48f8ae7 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json index 30af703ce..30af703ce 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json index c2843af63..c2843af63 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json index b8372f374..b8372f374 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json index cd5bb9fa4..cd5bb9fa4 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json index cb1e49d29..cb1e49d29 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json index 21d5662c7..21d5662c7 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json index 34657d826..34657d826 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json index 974cfaddb..974cfaddb 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json index 7a8231cc5..7a8231cc5 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json index cda21f237..cda21f237 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json index 4dfc98015..4dfc98015 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json index f6c85668c..f6c85668c 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json index cd0bec3c1..cd0bec3c1 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json index 2ecce08a9..2ecce08a9 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json index d80939efb..d80939efb 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json index f99099868..f99099868 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json index 143e16756..143e16756 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json index 1dea463ac..1dea463ac 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json index 42264585b..42264585b 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json index f6d0c99ed..f6d0c99ed 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json index 41c726cf0..41c726cf0 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json index 170c919cb..170c919cb 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json index 89d0db1a6..89d0db1a6 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json index 26cab29f8..26cab29f8 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json index 32a512829..32a512829 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json index 55e2199bb..55e2199bb 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json index 8b3cd6ed0..8b3cd6ed0 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json index bc5bbad22..bc5bbad22 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json index e54fa0d59..e54fa0d59 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json index 20ba97dac..20ba97dac 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json index 555647fab..555647fab 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json index cf9a4e903..cf9a4e903 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json index c78a96f7c..c78a96f7c 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json index 620425902..620425902 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json index 15cfc93fd..15cfc93fd 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json index c70ee7726..c70ee7726 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json index fb0578a67..fb0578a67 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json index dcbb24bf1..dcbb24bf1 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json index 1768f7a1b..1768f7a1b 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json index 4c2f81f2e..4c2f81f2e 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json index d8402ef72..d8402ef72 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml index 6de1ed061..24a05d56e 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml +++ b/roles/openshift_excluder/tasks/verify_excluder.yml @@ -11,7 +11,7 @@ failed_when: false changed_when: false - - name: Docker excluder version detected + - name: "{{ excluder }} version detected" debug: msg: "{{ excluder }}: {{ excluder_version.stdout }}" diff --git a/roles/openshift_excluder/tasks/verify_upgrade.yml b/roles/openshift_excluder/tasks/verify_upgrade.yml new file mode 100644 index 000000000..6ea2130ac --- /dev/null +++ b/roles/openshift_excluder/tasks/verify_upgrade.yml @@ -0,0 +1,15 @@ +--- +# input variables +# - repoquery_cmd +# - openshift_upgrade_target +- include: init.yml + +- include: verify_excluder.yml + vars: + excluder: "{{ openshift.common.service_type }}-docker-excluder" + when: docker_excluder_on + +- include: verify_excluder.yml + vars: + excluder: "{{ openshift.common.service_type }}-excluder" + when: openshift_excluder_on diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml index 00603f4fa..4cb5418c6 100644 --- a/roles/openshift_expand_partition/tasks/main.yml +++ b/roles/openshift_expand_partition/tasks/main.yml @@ -6,7 +6,7 @@ - name: Determine if growpart is installed command: "rpm -q cloud-utils-growpart" register: has_growpart - failed_when: "has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout" + failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout changed_when: false when: openshift.common.is_containerized | bool diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index adeb85c3f..914e46c05 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1,7 +1,6 @@ #!/usr/bin/python # pylint: disable=too-many-lines # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 # Reason: Disable pylint too-many-lines because we don't want to split up this file. # Status: Permanently disabled to keep this module as self-contained as possible. @@ -911,7 +910,7 @@ def set_version_facts_if_unset(facts): version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0') version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0') version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0') - version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0') or version >= LooseVersion('1.6.0') + version_gte_3_6 = version >= LooseVersion('3.6.0') else: version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905') version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1') @@ -919,25 +918,26 @@ def set_version_facts_if_unset(facts): version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0') version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0') version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0') - version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0') + version_gte_3_6 = version >= LooseVersion('3.6.0') else: + # 'Latest' version is set to True, 'Next' versions set to False version_gte_3_1_or_1_1 = True version_gte_3_1_1_or_1_1_1 = True version_gte_3_2_or_1_2 = True version_gte_3_3_or_1_3 = True version_gte_3_4_or_1_4 = True version_gte_3_5_or_1_5 = True - version_gte_3_6_or_1_6 = False + version_gte_3_6 = True facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1 facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1 facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2 facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3 facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4 facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5 - facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6 + facts['common']['version_gte_3_6'] = version_gte_3_6 - if version_gte_3_6_or_1_6: - examples_content_version = 'v1.6' + if version_gte_3_6: + examples_content_version = 'v3.6' elif version_gte_3_5_or_1_5: examples_content_version = 'v1.5' elif version_gte_3_4_or_1_4: @@ -1791,6 +1791,12 @@ def set_container_facts_if_unset(facts): deployer_image = 'openshift/origin-deployer' facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') + # If openshift_docker_use_system_container is set and is True .... + if 'use_system_container' in list(facts['docker'].keys()): + if facts['docker']['use_system_container']: + # ... set the service name to container-engine + facts['docker']['service_name'] = 'container-engine' + if 'is_containerized' not in facts['common']: facts['common']['is_containerized'] = facts['common']['is_atomic'] if 'cli_image' not in facts['common']: @@ -1910,14 +1916,16 @@ class OpenShiftFacts(object): ) self.role = role + # Collect system facts and preface each fact with 'ansible_'. try: - # ansible-2.1 # pylint: disable=too-many-function-args,invalid-name self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405 + additional_facts = {} for (k, v) in self.system_facts.items(): - self.system_facts["ansible_%s" % k.replace('-', '_')] = v + additional_facts["ansible_%s" % k.replace('-', '_')] = v + self.system_facts.update(additional_facts) except UnboundLocalError: - # ansible-2.2 + # ansible-2.2,2.3 self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405 self.facts = self.generate_facts(local_facts, @@ -2071,6 +2079,7 @@ class OpenShiftFacts(object): hosted_registry_insecure = get_hosted_registry_insecure() if hosted_registry_insecure is not None: docker['hosted_registry_insecure'] = hosted_registry_insecure + docker['service_name'] = 'docker' defaults['docker'] = docker if 'clock' in roles: @@ -2158,7 +2167,9 @@ class OpenShiftFacts(object): glusterfs=dict( endpoints='glusterfs-registry-endpoints', path='glusterfs-registry-volume', - readOnly=False), + readOnly=False, + swap=False, + swapcopy=True), host=None, access=dict( modes=['ReadWriteMany'] diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index 208e81048..7bce7f107 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -1,4 +1,3 @@ -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Ansible callback plugin. ''' diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py index a46589443..4460ec324 100755 --- a/roles/openshift_health_checker/library/aos_version.py +++ b/roles/openshift_health_checker/library/aos_version.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Ansible module for yum-based systems determining if multiple releases of an OpenShift package are available, and if the release requested diff --git a/roles/openshift_health_checker/library/check_yum_update.py b/roles/openshift_health_checker/library/check_yum_update.py index 630ebc848..433795b67 100755 --- a/roles/openshift_health_checker/library/check_yum_update.py +++ b/roles/openshift_health_checker/library/check_yum_update.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Ansible module to test whether a yum update or install will succeed, without actually performing it or running yum. diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md index 6d576df71..3e5d7f860 100644 --- a/roles/openshift_hosted/README.md +++ b/roles/openshift_hosted/README.md @@ -28,6 +28,14 @@ From this role: | openshift_hosted_registry_selector | region=infra | Node selector used when creating registry. The OpenShift registry will only be deployed to nodes matching this selector. | | openshift_hosted_registry_cert_expire_days | `730` (2 years) | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later. | +If you specify `openshift_hosted_registry_kind=glusterfs`, the following +variables also control configuration behavior: + +| Name | Default value | Description | +|----------------------------------------------|---------------|------------------------------------------------------------------------------| +| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume | +| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, also copy the current contents of the registry volume | + Dependencies ------------ diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index 596b36239..e7e62e5e4 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -24,9 +24,9 @@ openshift_hosted_routers: ports: - 80:80 - 443:443 - certificates: "{{ openshift_hosted_router_certificates | default({}) }}" + certificate: "{{ openshift_hosted_router_certificate | default({}) }}" -openshift_hosted_router_certificates: {} +openshift_hosted_router_certificate: {} openshift_hosted_registry_cert_expire_days: 730 openshift_hosted_router_create_certificate: False diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index 6e691c26f..751489958 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -61,7 +61,7 @@ name: "{{ openshift_hosted_registry_serviceaccount }}" namespace: "{{ openshift_hosted_registry_namespace }}" -- name: Grant the registry serivce account access to the appropriate scc +- name: Grant the registry service account access to the appropriate scc oc_adm_policy_user: user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}" namespace: "{{ openshift_hosted_registry_namespace }}" @@ -126,4 +126,4 @@ - include: storage/glusterfs.yml when: - - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' + - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml index b18b24266..e6bb196b8 100644 --- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml @@ -1,10 +1,18 @@ --- +- name: Get registry DeploymentConfig + oc_obj: + namespace: "{{ openshift_hosted_registry_namespace }}" + state: list + kind: dc + name: "{{ openshift_hosted_registry_name }}" + register: registry_dc + - name: Wait for registry pods oc_obj: namespace: "{{ openshift_hosted_registry_namespace }}" state: list kind: pod - selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}" + selector: "{% for label, value in registry_dc.results.results[0].spec.selector.iteritems() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}" register: registry_pods until: - "registry_pods.results.results[0]['items'] | count > 0" @@ -38,6 +46,39 @@ mode: "2775" recurse: True +- block: + - name: Activate registry maintenance mode + oc_env: + namespace: "{{ openshift_hosted_registry_namespace }}" + name: "{{ openshift_hosted_registry_name }}" + env_vars: + - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' + + - name: Get first registry pod name + set_fact: + registry_pod_name: "{{ registry_pods.results.results[0]['items'][0].metadata.name }}" + + - name: Copy current registry contents to new GlusterFS volume + command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/" + when: openshift.hosted.registry.storage.glusterfs.swapcopy + + - name: Swap new GlusterFS registry volume + oc_volume: + namespace: "{{ openshift_hosted_registry_namespace }}" + name: "{{ openshift_hosted_registry_name }}" + vol_name: registry-storage + mount_type: pvc + claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + + - name: Deactivate registry maintenance mode + oc_env: + namespace: "{{ openshift_hosted_registry_namespace }}" + name: "{{ openshift_hosted_registry_name }}" + state: absent + env_vars: + - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' + when: openshift.hosted.registry.storage.glusterfs.swap + - name: Unmount registry volume mount: state: unmounted diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index c71d0a34f..e75e3b16f 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -25,13 +25,13 @@ hostnames: - "{{ openshift_master_default_subdomain }}" - "*.{{ openshift_master_default_subdomain }}" - cert: "{{ ('/etc/origin/master/' ~ (item.certificates.certfile | basename)) if 'certfile' in item.certificates else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}" - key: "{{ ('/etc/origin/master/' ~ (item.certificates.keyfile | basename)) if 'keyfile' in item.certificates else ((openshift_master_config_dir) ~ '/openshift-router.key') }}" + cert: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}" + key: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.key') }}" with_items: "{{ openshift_hosted_routers }}" - - name: set the openshift_hosted_router_certificates + - name: set the openshift_hosted_router_certificate set_fact: - openshift_hosted_router_certificates: + openshift_hosted_router_certificate: certfile: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}" keyfile: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}" cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}" @@ -44,7 +44,7 @@ backup: True dest: "/etc/origin/master/{{ item | basename }}" src: "{{ item }}" - with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificates') | + with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" when: not openshift_hosted_router_create_certificate @@ -82,9 +82,9 @@ service_account: "{{ item.serviceaccount | default('router') }}" selector: "{{ item.selector | default(none) }}" images: "{{ item.images | default(omit) }}" - cert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.certfile | basename)) if 'certfile' in item.certificates else omit }}" - key_file: "{{ ('/etc/origin/master/' ~ (item.certificates.keyfile | basename)) if 'keyfile' in item.certificates else omit }}" - cacert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.cafile | basename)) if 'cafile' in item.certificates else omit }}" + cert_file: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else omit }}" + key_file: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else omit }}" + cacert_file: "{{ ('/etc/origin/master/' ~ (item.certificate.cafile | basename)) if 'cafile' in item.certificate else omit }}" edits: "{{ openshift_hosted_router_edits | union(item.edits) }}" ports: "{{ item.ports }}" stats_port: "{{ item.stats_port }}" diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml index afd82766f..78b624109 100644 --- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml +++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml @@ -36,7 +36,7 @@ command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }} register: secret_output - failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr" + failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr - name: "Create templates for logging accounts and the deployer" command: > @@ -60,21 +60,21 @@ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer register: permiss_output - failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr" + failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr - name: "Set permissions for fluentd" command: > {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd register: fluentd_output - failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr" + failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr - name: "Set additional permissions for fluentd" command: > {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd register: fluentd2_output - failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr" + failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr - name: "Add rolebinding-reader to aggregated-logging-elasticsearch" command: > @@ -82,13 +82,13 @@ policy add-cluster-role-to-user rolebinding-reader \ system:serviceaccount:logging:aggregated-logging-elasticsearch register: rolebinding_reader_output - failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr" + failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr - name: "Create ConfigMap for deployer parameters" command: > {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }} register: deployer_configmap_output - failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr" + failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr - name: "Process the deployer template" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}" diff --git a/roles/openshift_hosted_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml index 6a442cefc..15dd1bd54 100644 --- a/roles/openshift_hosted_metrics/tasks/install.yml +++ b/roles/openshift_hosted_metrics/tasks/install.yml @@ -81,7 +81,7 @@ secrets new metrics-deployer nothing=/dev/null register: metrics_deployer_secret changed_when: metrics_deployer_secret.rc == 0 - failed_when: "metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr" + failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr # TODO: extend this to allow user passed in certs or generating cert with # OpenShift CA diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml index c67058696..5abb2ef83 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml @@ -223,7 +223,7 @@ items: - description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set version "3.4.0"' name: IMAGE_VERSION - value: "3.4.0" + value: "v3.4" - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." name: IMAGE_PULL_SECRET diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml index 6ead122c5..1d319eab8 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml @@ -105,7 +105,7 @@ parameters: - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' name: IMAGE_VERSION - value: "3.4.0" + value: "v3.4" - description: "Internal URL for the master, for authentication retrieval" name: MASTER_URL @@ -118,7 +118,7 @@ parameters: description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" name: MODE value: "deploy" -- +- description: "Set to true to continue even if the deployer runs into an error." name: CONTINUE_ON_ERROR value: "false" diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml deleted file mode 100644 index fdfc285ca..000000000 --- a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml +++ /dev/null @@ -1,345 +0,0 @@ -apiVersion: "v1" -kind: "List" -items: -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-account-template - annotations: - description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." - tags: "infrastructure" - objects: - - - apiVersion: v1 - kind: ServiceAccount - name: logging-deployer - metadata: - name: logging-deployer - labels: - logging-infra: deployer - provider: openshift - component: deployer - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-kibana - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-elasticsearch - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-fluentd - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-curator - - apiVersion: v1 - kind: ClusterRole - metadata: - name: oauth-editor - rules: - - resources: - - oauthclients - verbs: - - create - - delete - - apiVersion: v1 - kind: ClusterRole - metadata: - name: daemonset-admin - rules: - - resources: - - daemonsets - apiGroups: - - extensions - verbs: - - create - - get - - list - - watch - - delete - - update - - apiVersion: v1 - kind: ClusterRole - metadata: - name: rolebinding-reader - rules: - - resources: - - clusterrolebindings - verbs: - - get - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-edit-role - roleRef: - kind: ClusterRole - name: edit - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-dsadmin-role - roleRef: - kind: ClusterRole - name: daemonset-admin - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-elasticsearch-view-role - roleRef: - kind: ClusterRole - name: view - subjects: - - kind: ServiceAccount - name: aggregated-logging-elasticsearch -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-template - annotations: - description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." - tags: "infrastructure" - labels: - logging-infra: deployer - provider: openshift - objects: - - - apiVersion: v1 - kind: Pod - metadata: - generateName: logging-deployer- - spec: - containers: - - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION} - imagePullPolicy: Always - name: deployer - volumeMounts: - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: IMAGE_PULL_SECRET - value: ${IMAGE_PULL_SECRET} - - name: INSECURE_REGISTRY - value: ${INSECURE_REGISTRY} - - name: ENABLE_OPS_CLUSTER - value: ${ENABLE_OPS_CLUSTER} - - name: KIBANA_HOSTNAME - value: ${KIBANA_HOSTNAME} - - name: KIBANA_OPS_HOSTNAME - value: ${KIBANA_OPS_HOSTNAME} - - name: PUBLIC_MASTER_URL - value: ${PUBLIC_MASTER_URL} - - name: MASTER_URL - value: ${MASTER_URL} - - name: ES_INSTANCE_RAM - value: ${ES_INSTANCE_RAM} - - name: ES_PVC_SIZE - value: ${ES_PVC_SIZE} - - name: ES_PVC_PREFIX - value: ${ES_PVC_PREFIX} - - name: ES_PVC_DYNAMIC - value: ${ES_PVC_DYNAMIC} - - name: ES_CLUSTER_SIZE - value: ${ES_CLUSTER_SIZE} - - name: ES_NODE_QUORUM - value: ${ES_NODE_QUORUM} - - name: ES_RECOVER_AFTER_NODES - value: ${ES_RECOVER_AFTER_NODES} - - name: ES_RECOVER_EXPECTED_NODES - value: ${ES_RECOVER_EXPECTED_NODES} - - name: ES_RECOVER_AFTER_TIME - value: ${ES_RECOVER_AFTER_TIME} - - name: ES_OPS_INSTANCE_RAM - value: ${ES_OPS_INSTANCE_RAM} - - name: ES_OPS_PVC_SIZE - value: ${ES_OPS_PVC_SIZE} - - name: ES_OPS_PVC_PREFIX - value: ${ES_OPS_PVC_PREFIX} - - name: ES_OPS_PVC_DYNAMIC - value: ${ES_OPS_PVC_DYNAMIC} - - name: ES_OPS_CLUSTER_SIZE - value: ${ES_OPS_CLUSTER_SIZE} - - name: ES_OPS_NODE_QUORUM - value: ${ES_OPS_NODE_QUORUM} - - name: ES_OPS_RECOVER_AFTER_NODES - value: ${ES_OPS_RECOVER_AFTER_NODES} - - name: ES_OPS_RECOVER_EXPECTED_NODES - value: ${ES_OPS_RECOVER_EXPECTED_NODES} - - name: ES_OPS_RECOVER_AFTER_TIME - value: ${ES_OPS_RECOVER_AFTER_TIME} - - name: FLUENTD_NODESELECTOR - value: ${FLUENTD_NODESELECTOR} - - name: ES_NODESELECTOR - value: ${ES_NODESELECTOR} - - name: ES_OPS_NODESELECTOR - value: ${ES_OPS_NODESELECTOR} - - name: KIBANA_NODESELECTOR - value: ${KIBANA_NODESELECTOR} - - name: KIBANA_OPS_NODESELECTOR - value: ${KIBANA_OPS_NODESELECTOR} - - name: CURATOR_NODESELECTOR - value: ${CURATOR_NODESELECTOR} - - name: CURATOR_OPS_NODESELECTOR - value: ${CURATOR_OPS_NODESELECTOR} - - name: MODE - value: ${MODE} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: logging-deployer - volumes: - - name: empty - emptyDir: {} - parameters: - - - description: "The mode that the deployer runs in." - name: MODE - value: "install" - - - description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"' - name: IMAGE_PREFIX - value: "registry.access.redhat.com/openshift3/" - - - description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"' - name: IMAGE_VERSION - value: "3.4.0" - - - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." - name: IMAGE_PULL_SECRET - - - description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" - name: INSECURE_REGISTRY - value: "false" - - - description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." - name: ENABLE_OPS_CLUSTER - value: "false" - - - description: "(Deprecated) External hostname where clients will reach kibana" - name: KIBANA_HOSTNAME - value: "kibana.example.com" - - - description: "(Deprecated) External hostname at which admins will visit the ops Kibana." - name: KIBANA_OPS_HOSTNAME - value: kibana-ops.example.com - - - description: "(Deprecated) External URL for the master, for OAuth purposes" - name: PUBLIC_MASTER_URL - value: "https://localhost:8443" - - - description: "(Deprecated) Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc.cluster.local" - - - description: "(Deprecated) How many instances of ElasticSearch to deploy." - name: ES_CLUSTER_SIZE - value: "1" - - - description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." - name: ES_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." - name: ES_PVC_PREFIX - value: "logging-es-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' - name: ES_PVC_DYNAMIC - - - description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_NODE_QUORUM - - - description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." - name: ES_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." - name: ES_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." - name: ES_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." - name: ES_OPS_CLUSTER_SIZE - - - description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." - name: ES_OPS_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_OPS_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." - name: ES_OPS_PVC_PREFIX - value: "logging-es-ops-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' - name: ES_OPS_PVC_DYNAMIC - - - description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_OPS_NODE_QUORUM - - - description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." - name: ES_OPS_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." - name: FLUENTD_NODESELECTOR - value: "logging-infra-fluentd=true" - - - description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." - name: ES_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." - name: ES_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana cluster (label=value)." - name: KIBANA_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana operations cluster (label=value)." - name: KIBANA_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Curator (label=value)." - name: CURATOR_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector operations Curator (label=value)." - name: CURATOR_OPS_NODESELECTOR - value: "" diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml deleted file mode 100644 index c4ab794ae..000000000 --- a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: - name: metrics-deployer-template - annotations: - description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." - tags: "infrastructure" -labels: - metrics-infra: deployer - provider: openshift - component: deployer -objects: -- - apiVersion: v1 - kind: Pod - metadata: - generateName: metrics-deployer- - spec: - securityContext: {} - containers: - - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} - name: deployer - securityContext: {} - volumeMounts: - - name: secret - mountPath: /secret - readOnly: true - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: MASTER_URL - value: ${MASTER_URL} - - name: MODE - value: ${MODE} - - name: CONTINUE_ON_ERROR - value: ${CONTINUE_ON_ERROR} - - name: REDEPLOY - value: ${REDEPLOY} - - name: IGNORE_PREFLIGHT - value: ${IGNORE_PREFLIGHT} - - name: USE_PERSISTENT_STORAGE - value: ${USE_PERSISTENT_STORAGE} - - name: DYNAMICALLY_PROVISION_STORAGE - value: ${DYNAMICALLY_PROVISION_STORAGE} - - name: HAWKULAR_METRICS_HOSTNAME - value: ${HAWKULAR_METRICS_HOSTNAME} - - name: CASSANDRA_NODES - value: ${CASSANDRA_NODES} - - name: CASSANDRA_PV_SIZE - value: ${CASSANDRA_PV_SIZE} - - name: METRIC_DURATION - value: ${METRIC_DURATION} - - name: USER_WRITE_ACCESS - value: ${USER_WRITE_ACCESS} - - name: HEAPSTER_NODE_ID - value: ${HEAPSTER_NODE_ID} - - name: METRIC_RESOLUTION - value: ${METRIC_RESOLUTION} - - name: STARTUP_TIMEOUT - value: ${STARTUP_TIMEOUT} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: metrics-deployer - volumes: - - name: empty - emptyDir: {} - - name: secret - secret: - secretName: metrics-deployer -parameters: -- - description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "registry.access.redhat.com/openshift3/" -- - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' - name: IMAGE_VERSION - value: "v3.5" -- - description: "Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc:443" -- - description: "External hostname where clients will reach Hawkular Metrics" - name: HAWKULAR_METRICS_HOSTNAME - required: true -- - description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" - name: MODE - value: "deploy" -- - description: "Set to true to continue even if the deployer runs into an error." - name: CONTINUE_ON_ERROR - value: "false" -- - description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)" - name: REDEPLOY - value: "false" -- - description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." - name: IGNORE_PREFLIGHT - value: "false" -- - description: "Set to true for persistent storage, set to false to use non persistent storage" - name: USE_PERSISTENT_STORAGE - value: "true" -- - description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" - name: DYNAMICALLY_PROVISION_STORAGE - value: "false" -- - description: "The number of Cassandra Nodes to deploy for the initial cluster" - name: CASSANDRA_NODES - value: "1" -- - description: "The persistent volume size for each of the Cassandra nodes" - name: CASSANDRA_PV_SIZE - value: "10Gi" -- - description: "How many days metrics should be stored for." - name: METRIC_DURATION - value: "7" -- - description: "If a user accounts should be allowed to write metrics." - name: USER_WRITE_ACCESS - value: "false" -- - description: "The identifier used when generating metric ids in Hawkular" - name: HEAPSTER_NODE_ID - value: "nodename" -- - description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds" - name: METRIC_RESOLUTION - value: "30s" -- - description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" - name: STARTUP_TIMEOUT - value: "500" diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml deleted file mode 100644 index 5b5503500..000000000 --- a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml +++ /dev/null @@ -1,342 +0,0 @@ -apiVersion: "v1" -kind: "List" -items: -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-account-template - annotations: - description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." - tags: "infrastructure" - objects: - - - apiVersion: v1 - kind: ServiceAccount - name: logging-deployer - metadata: - name: logging-deployer - labels: - logging-infra: deployer - provider: openshift - component: deployer - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-kibana - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-elasticsearch - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-fluentd - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-curator - - apiVersion: v1 - kind: ClusterRole - metadata: - name: oauth-editor - rules: - - resources: - - oauthclients - verbs: - - create - - delete - - apiVersion: v1 - kind: ClusterRole - metadata: - name: daemonset-admin - rules: - - resources: - - daemonsets - apiGroups: - - extensions - verbs: - - create - - get - - list - - watch - - delete - - update - - apiVersion: v1 - kind: ClusterRole - metadata: - name: rolebinding-reader - rules: - - resources: - - clusterrolebindings - verbs: - - get - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-edit-role - roleRef: - name: edit - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-dsadmin-role - roleRef: - name: daemonset-admin - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-elasticsearch-view-role - roleRef: - name: view - subjects: - - kind: ServiceAccount - name: aggregated-logging-elasticsearch -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-template - annotations: - description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." - tags: "infrastructure" - labels: - logging-infra: deployer - provider: openshift - objects: - - - apiVersion: v1 - kind: Pod - metadata: - generateName: logging-deployer- - spec: - containers: - - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION} - imagePullPolicy: Always - name: deployer - volumeMounts: - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: IMAGE_PULL_SECRET - value: ${IMAGE_PULL_SECRET} - - name: INSECURE_REGISTRY - value: ${INSECURE_REGISTRY} - - name: ENABLE_OPS_CLUSTER - value: ${ENABLE_OPS_CLUSTER} - - name: KIBANA_HOSTNAME - value: ${KIBANA_HOSTNAME} - - name: KIBANA_OPS_HOSTNAME - value: ${KIBANA_OPS_HOSTNAME} - - name: PUBLIC_MASTER_URL - value: ${PUBLIC_MASTER_URL} - - name: MASTER_URL - value: ${MASTER_URL} - - name: ES_INSTANCE_RAM - value: ${ES_INSTANCE_RAM} - - name: ES_PVC_SIZE - value: ${ES_PVC_SIZE} - - name: ES_PVC_PREFIX - value: ${ES_PVC_PREFIX} - - name: ES_PVC_DYNAMIC - value: ${ES_PVC_DYNAMIC} - - name: ES_CLUSTER_SIZE - value: ${ES_CLUSTER_SIZE} - - name: ES_NODE_QUORUM - value: ${ES_NODE_QUORUM} - - name: ES_RECOVER_AFTER_NODES - value: ${ES_RECOVER_AFTER_NODES} - - name: ES_RECOVER_EXPECTED_NODES - value: ${ES_RECOVER_EXPECTED_NODES} - - name: ES_RECOVER_AFTER_TIME - value: ${ES_RECOVER_AFTER_TIME} - - name: ES_OPS_INSTANCE_RAM - value: ${ES_OPS_INSTANCE_RAM} - - name: ES_OPS_PVC_SIZE - value: ${ES_OPS_PVC_SIZE} - - name: ES_OPS_PVC_PREFIX - value: ${ES_OPS_PVC_PREFIX} - - name: ES_OPS_PVC_DYNAMIC - value: ${ES_OPS_PVC_DYNAMIC} - - name: ES_OPS_CLUSTER_SIZE - value: ${ES_OPS_CLUSTER_SIZE} - - name: ES_OPS_NODE_QUORUM - value: ${ES_OPS_NODE_QUORUM} - - name: ES_OPS_RECOVER_AFTER_NODES - value: ${ES_OPS_RECOVER_AFTER_NODES} - - name: ES_OPS_RECOVER_EXPECTED_NODES - value: ${ES_OPS_RECOVER_EXPECTED_NODES} - - name: ES_OPS_RECOVER_AFTER_TIME - value: ${ES_OPS_RECOVER_AFTER_TIME} - - name: FLUENTD_NODESELECTOR - value: ${FLUENTD_NODESELECTOR} - - name: ES_NODESELECTOR - value: ${ES_NODESELECTOR} - - name: ES_OPS_NODESELECTOR - value: ${ES_OPS_NODESELECTOR} - - name: KIBANA_NODESELECTOR - value: ${KIBANA_NODESELECTOR} - - name: KIBANA_OPS_NODESELECTOR - value: ${KIBANA_OPS_NODESELECTOR} - - name: CURATOR_NODESELECTOR - value: ${CURATOR_NODESELECTOR} - - name: CURATOR_OPS_NODESELECTOR - value: ${CURATOR_OPS_NODESELECTOR} - - name: MODE - value: ${MODE} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: logging-deployer - volumes: - - name: empty - emptyDir: {} - parameters: - - - description: "The mode that the deployer runs in." - name: MODE - value: "install" - - - description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "docker.io/openshift/origin-" - - - description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' - name: IMAGE_VERSION - value: "latest" - - - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." - name: IMAGE_PULL_SECRET - - - description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" - name: INSECURE_REGISTRY - value: "false" - - - description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." - name: ENABLE_OPS_CLUSTER - value: "false" - - - description: "(Deprecated) External hostname where clients will reach kibana" - name: KIBANA_HOSTNAME - value: "kibana.example.com" - - - description: "(Deprecated) External hostname at which admins will visit the ops Kibana." - name: KIBANA_OPS_HOSTNAME - value: kibana-ops.example.com - - - description: "(Deprecated) External URL for the master, for OAuth purposes" - name: PUBLIC_MASTER_URL - value: "https://localhost:8443" - - - description: "(Deprecated) Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc.cluster.local" - - - description: "(Deprecated) How many instances of ElasticSearch to deploy." - name: ES_CLUSTER_SIZE - value: "1" - - - description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." - name: ES_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." - name: ES_PVC_PREFIX - value: "logging-es-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' - name: ES_PVC_DYNAMIC - - - description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_NODE_QUORUM - - - description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." - name: ES_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." - name: ES_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." - name: ES_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." - name: ES_OPS_CLUSTER_SIZE - - - description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." - name: ES_OPS_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_OPS_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." - name: ES_OPS_PVC_PREFIX - value: "logging-es-ops-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' - name: ES_OPS_PVC_DYNAMIC - - - description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_OPS_NODE_QUORUM - - - description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." - name: ES_OPS_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." - name: FLUENTD_NODESELECTOR - value: "logging-infra-fluentd=true" - - - description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." - name: ES_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." - name: ES_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana cluster (label=value)." - name: KIBANA_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana operations cluster (label=value)." - name: KIBANA_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Curator (label=value)." - name: CURATOR_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector operations Curator (label=value)." - name: CURATOR_OPS_NODESELECTOR - value: "" diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml deleted file mode 100644 index d191c0439..000000000 --- a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: - name: metrics-deployer-template - annotations: - description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." - tags: "infrastructure" -labels: - metrics-infra: deployer - provider: openshift - component: deployer -objects: -- - apiVersion: v1 - kind: Pod - metadata: - generateName: metrics-deployer- - spec: - securityContext: {} - containers: - - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} - name: deployer - securityContext: {} - volumeMounts: - - name: secret - mountPath: /secret - readOnly: true - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: MASTER_URL - value: ${MASTER_URL} - - name: MODE - value: ${MODE} - - name: CONTINUE_ON_ERROR - value: ${CONTINUE_ON_ERROR} - - name: REDEPLOY - value: ${REDEPLOY} - - name: IGNORE_PREFLIGHT - value: ${IGNORE_PREFLIGHT} - - name: USE_PERSISTENT_STORAGE - value: ${USE_PERSISTENT_STORAGE} - - name: DYNAMICALLY_PROVISION_STORAGE - value: ${DYNAMICALLY_PROVISION_STORAGE} - - name: HAWKULAR_METRICS_HOSTNAME - value: ${HAWKULAR_METRICS_HOSTNAME} - - name: CASSANDRA_NODES - value: ${CASSANDRA_NODES} - - name: CASSANDRA_PV_SIZE - value: ${CASSANDRA_PV_SIZE} - - name: METRIC_DURATION - value: ${METRIC_DURATION} - - name: USER_WRITE_ACCESS - value: ${USER_WRITE_ACCESS} - - name: HEAPSTER_NODE_ID - value: ${HEAPSTER_NODE_ID} - - name: METRIC_RESOLUTION - value: ${METRIC_RESOLUTION} - - name: STARTUP_TIMEOUT - value: ${STARTUP_TIMEOUT} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: metrics-deployer - volumes: - - name: empty - emptyDir: {} - - name: secret - secret: - secretName: metrics-deployer -parameters: -- - description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "openshift/origin-" -- - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' - name: IMAGE_VERSION - value: "latest" -- - description: "Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc:443" -- - description: "External hostname where clients will reach Hawkular Metrics" - name: HAWKULAR_METRICS_HOSTNAME - required: true -- - description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" - name: MODE - value: "deploy" -- - description: "Set to true to continue even if the deployer runs into an error." - name: CONTINUE_ON_ERROR - value: "false" -- - description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)" - name: REDEPLOY - value: "false" -- - description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." - name: IGNORE_PREFLIGHT - value: "false" -- - description: "Set to true for persistent storage, set to false to use non persistent storage" - name: USE_PERSISTENT_STORAGE - value: "true" -- - description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" - name: DYNAMICALLY_PROVISION_STORAGE - value: "false" -- - description: "The number of Cassandra Nodes to deploy for the initial cluster" - name: CASSANDRA_NODES - value: "1" -- - description: "The persistent volume size for each of the Cassandra nodes" - name: CASSANDRA_PV_SIZE - value: "10Gi" -- - description: "How many days metrics should be stored for." - name: METRIC_DURATION - value: "7" -- - description: "If a user accounts should be allowed to write metrics." - name: USER_WRITE_ACCESS - value: "false" -- - description: "The identifier used when generating metric ids in Hawkular" - name: HEAPSTER_NODE_ID - value: "nodename" -- - description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds" - name: METRIC_RESOLUTION - value: "30s" -- - description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" - name: STARTUP_TIMEOUT - value: "500" diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml deleted file mode 100644 index fdfc285ca..000000000 --- a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml +++ /dev/null @@ -1,345 +0,0 @@ -apiVersion: "v1" -kind: "List" -items: -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-account-template - annotations: - description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." - tags: "infrastructure" - objects: - - - apiVersion: v1 - kind: ServiceAccount - name: logging-deployer - metadata: - name: logging-deployer - labels: - logging-infra: deployer - provider: openshift - component: deployer - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-kibana - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-elasticsearch - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-fluentd - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-curator - - apiVersion: v1 - kind: ClusterRole - metadata: - name: oauth-editor - rules: - - resources: - - oauthclients - verbs: - - create - - delete - - apiVersion: v1 - kind: ClusterRole - metadata: - name: daemonset-admin - rules: - - resources: - - daemonsets - apiGroups: - - extensions - verbs: - - create - - get - - list - - watch - - delete - - update - - apiVersion: v1 - kind: ClusterRole - metadata: - name: rolebinding-reader - rules: - - resources: - - clusterrolebindings - verbs: - - get - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-edit-role - roleRef: - kind: ClusterRole - name: edit - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-dsadmin-role - roleRef: - kind: ClusterRole - name: daemonset-admin - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-elasticsearch-view-role - roleRef: - kind: ClusterRole - name: view - subjects: - - kind: ServiceAccount - name: aggregated-logging-elasticsearch -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-template - annotations: - description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." - tags: "infrastructure" - labels: - logging-infra: deployer - provider: openshift - objects: - - - apiVersion: v1 - kind: Pod - metadata: - generateName: logging-deployer- - spec: - containers: - - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION} - imagePullPolicy: Always - name: deployer - volumeMounts: - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: IMAGE_PULL_SECRET - value: ${IMAGE_PULL_SECRET} - - name: INSECURE_REGISTRY - value: ${INSECURE_REGISTRY} - - name: ENABLE_OPS_CLUSTER - value: ${ENABLE_OPS_CLUSTER} - - name: KIBANA_HOSTNAME - value: ${KIBANA_HOSTNAME} - - name: KIBANA_OPS_HOSTNAME - value: ${KIBANA_OPS_HOSTNAME} - - name: PUBLIC_MASTER_URL - value: ${PUBLIC_MASTER_URL} - - name: MASTER_URL - value: ${MASTER_URL} - - name: ES_INSTANCE_RAM - value: ${ES_INSTANCE_RAM} - - name: ES_PVC_SIZE - value: ${ES_PVC_SIZE} - - name: ES_PVC_PREFIX - value: ${ES_PVC_PREFIX} - - name: ES_PVC_DYNAMIC - value: ${ES_PVC_DYNAMIC} - - name: ES_CLUSTER_SIZE - value: ${ES_CLUSTER_SIZE} - - name: ES_NODE_QUORUM - value: ${ES_NODE_QUORUM} - - name: ES_RECOVER_AFTER_NODES - value: ${ES_RECOVER_AFTER_NODES} - - name: ES_RECOVER_EXPECTED_NODES - value: ${ES_RECOVER_EXPECTED_NODES} - - name: ES_RECOVER_AFTER_TIME - value: ${ES_RECOVER_AFTER_TIME} - - name: ES_OPS_INSTANCE_RAM - value: ${ES_OPS_INSTANCE_RAM} - - name: ES_OPS_PVC_SIZE - value: ${ES_OPS_PVC_SIZE} - - name: ES_OPS_PVC_PREFIX - value: ${ES_OPS_PVC_PREFIX} - - name: ES_OPS_PVC_DYNAMIC - value: ${ES_OPS_PVC_DYNAMIC} - - name: ES_OPS_CLUSTER_SIZE - value: ${ES_OPS_CLUSTER_SIZE} - - name: ES_OPS_NODE_QUORUM - value: ${ES_OPS_NODE_QUORUM} - - name: ES_OPS_RECOVER_AFTER_NODES - value: ${ES_OPS_RECOVER_AFTER_NODES} - - name: ES_OPS_RECOVER_EXPECTED_NODES - value: ${ES_OPS_RECOVER_EXPECTED_NODES} - - name: ES_OPS_RECOVER_AFTER_TIME - value: ${ES_OPS_RECOVER_AFTER_TIME} - - name: FLUENTD_NODESELECTOR - value: ${FLUENTD_NODESELECTOR} - - name: ES_NODESELECTOR - value: ${ES_NODESELECTOR} - - name: ES_OPS_NODESELECTOR - value: ${ES_OPS_NODESELECTOR} - - name: KIBANA_NODESELECTOR - value: ${KIBANA_NODESELECTOR} - - name: KIBANA_OPS_NODESELECTOR - value: ${KIBANA_OPS_NODESELECTOR} - - name: CURATOR_NODESELECTOR - value: ${CURATOR_NODESELECTOR} - - name: CURATOR_OPS_NODESELECTOR - value: ${CURATOR_OPS_NODESELECTOR} - - name: MODE - value: ${MODE} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: logging-deployer - volumes: - - name: empty - emptyDir: {} - parameters: - - - description: "The mode that the deployer runs in." - name: MODE - value: "install" - - - description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"' - name: IMAGE_PREFIX - value: "registry.access.redhat.com/openshift3/" - - - description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"' - name: IMAGE_VERSION - value: "3.4.0" - - - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." - name: IMAGE_PULL_SECRET - - - description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" - name: INSECURE_REGISTRY - value: "false" - - - description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." - name: ENABLE_OPS_CLUSTER - value: "false" - - - description: "(Deprecated) External hostname where clients will reach kibana" - name: KIBANA_HOSTNAME - value: "kibana.example.com" - - - description: "(Deprecated) External hostname at which admins will visit the ops Kibana." - name: KIBANA_OPS_HOSTNAME - value: kibana-ops.example.com - - - description: "(Deprecated) External URL for the master, for OAuth purposes" - name: PUBLIC_MASTER_URL - value: "https://localhost:8443" - - - description: "(Deprecated) Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc.cluster.local" - - - description: "(Deprecated) How many instances of ElasticSearch to deploy." - name: ES_CLUSTER_SIZE - value: "1" - - - description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." - name: ES_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." - name: ES_PVC_PREFIX - value: "logging-es-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' - name: ES_PVC_DYNAMIC - - - description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_NODE_QUORUM - - - description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." - name: ES_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." - name: ES_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." - name: ES_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." - name: ES_OPS_CLUSTER_SIZE - - - description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." - name: ES_OPS_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_OPS_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." - name: ES_OPS_PVC_PREFIX - value: "logging-es-ops-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' - name: ES_OPS_PVC_DYNAMIC - - - description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_OPS_NODE_QUORUM - - - description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." - name: ES_OPS_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." - name: FLUENTD_NODESELECTOR - value: "logging-infra-fluentd=true" - - - description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." - name: ES_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." - name: ES_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana cluster (label=value)." - name: KIBANA_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana operations cluster (label=value)." - name: KIBANA_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Curator (label=value)." - name: CURATOR_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector operations Curator (label=value)." - name: CURATOR_OPS_NODESELECTOR - value: "" diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml deleted file mode 100644 index c4ab794ae..000000000 --- a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: - name: metrics-deployer-template - annotations: - description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." - tags: "infrastructure" -labels: - metrics-infra: deployer - provider: openshift - component: deployer -objects: -- - apiVersion: v1 - kind: Pod - metadata: - generateName: metrics-deployer- - spec: - securityContext: {} - containers: - - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} - name: deployer - securityContext: {} - volumeMounts: - - name: secret - mountPath: /secret - readOnly: true - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: MASTER_URL - value: ${MASTER_URL} - - name: MODE - value: ${MODE} - - name: CONTINUE_ON_ERROR - value: ${CONTINUE_ON_ERROR} - - name: REDEPLOY - value: ${REDEPLOY} - - name: IGNORE_PREFLIGHT - value: ${IGNORE_PREFLIGHT} - - name: USE_PERSISTENT_STORAGE - value: ${USE_PERSISTENT_STORAGE} - - name: DYNAMICALLY_PROVISION_STORAGE - value: ${DYNAMICALLY_PROVISION_STORAGE} - - name: HAWKULAR_METRICS_HOSTNAME - value: ${HAWKULAR_METRICS_HOSTNAME} - - name: CASSANDRA_NODES - value: ${CASSANDRA_NODES} - - name: CASSANDRA_PV_SIZE - value: ${CASSANDRA_PV_SIZE} - - name: METRIC_DURATION - value: ${METRIC_DURATION} - - name: USER_WRITE_ACCESS - value: ${USER_WRITE_ACCESS} - - name: HEAPSTER_NODE_ID - value: ${HEAPSTER_NODE_ID} - - name: METRIC_RESOLUTION - value: ${METRIC_RESOLUTION} - - name: STARTUP_TIMEOUT - value: ${STARTUP_TIMEOUT} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: metrics-deployer - volumes: - - name: empty - emptyDir: {} - - name: secret - secret: - secretName: metrics-deployer -parameters: -- - description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "registry.access.redhat.com/openshift3/" -- - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' - name: IMAGE_VERSION - value: "v3.5" -- - description: "Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc:443" -- - description: "External hostname where clients will reach Hawkular Metrics" - name: HAWKULAR_METRICS_HOSTNAME - required: true -- - description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" - name: MODE - value: "deploy" -- - description: "Set to true to continue even if the deployer runs into an error." - name: CONTINUE_ON_ERROR - value: "false" -- - description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)" - name: REDEPLOY - value: "false" -- - description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." - name: IGNORE_PREFLIGHT - value: "false" -- - description: "Set to true for persistent storage, set to false to use non persistent storage" - name: USE_PERSISTENT_STORAGE - value: "true" -- - description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" - name: DYNAMICALLY_PROVISION_STORAGE - value: "false" -- - description: "The number of Cassandra Nodes to deploy for the initial cluster" - name: CASSANDRA_NODES - value: "1" -- - description: "The persistent volume size for each of the Cassandra nodes" - name: CASSANDRA_PV_SIZE - value: "10Gi" -- - description: "How many days metrics should be stored for." - name: METRIC_DURATION - value: "7" -- - description: "If a user accounts should be allowed to write metrics." - name: USER_WRITE_ACCESS - value: "false" -- - description: "The identifier used when generating metric ids in Hawkular" - name: HEAPSTER_NODE_ID - value: "nodename" -- - description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds" - name: METRIC_RESOLUTION - value: "30s" -- - description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" - name: STARTUP_TIMEOUT - value: "500" diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml deleted file mode 100644 index 5b5503500..000000000 --- a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml +++ /dev/null @@ -1,342 +0,0 @@ -apiVersion: "v1" -kind: "List" -items: -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-account-template - annotations: - description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." - tags: "infrastructure" - objects: - - - apiVersion: v1 - kind: ServiceAccount - name: logging-deployer - metadata: - name: logging-deployer - labels: - logging-infra: deployer - provider: openshift - component: deployer - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-kibana - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-elasticsearch - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-fluentd - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-curator - - apiVersion: v1 - kind: ClusterRole - metadata: - name: oauth-editor - rules: - - resources: - - oauthclients - verbs: - - create - - delete - - apiVersion: v1 - kind: ClusterRole - metadata: - name: daemonset-admin - rules: - - resources: - - daemonsets - apiGroups: - - extensions - verbs: - - create - - get - - list - - watch - - delete - - update - - apiVersion: v1 - kind: ClusterRole - metadata: - name: rolebinding-reader - rules: - - resources: - - clusterrolebindings - verbs: - - get - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-edit-role - roleRef: - name: edit - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-dsadmin-role - roleRef: - name: daemonset-admin - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-elasticsearch-view-role - roleRef: - name: view - subjects: - - kind: ServiceAccount - name: aggregated-logging-elasticsearch -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-template - annotations: - description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." - tags: "infrastructure" - labels: - logging-infra: deployer - provider: openshift - objects: - - - apiVersion: v1 - kind: Pod - metadata: - generateName: logging-deployer- - spec: - containers: - - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION} - imagePullPolicy: Always - name: deployer - volumeMounts: - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: IMAGE_PULL_SECRET - value: ${IMAGE_PULL_SECRET} - - name: INSECURE_REGISTRY - value: ${INSECURE_REGISTRY} - - name: ENABLE_OPS_CLUSTER - value: ${ENABLE_OPS_CLUSTER} - - name: KIBANA_HOSTNAME - value: ${KIBANA_HOSTNAME} - - name: KIBANA_OPS_HOSTNAME - value: ${KIBANA_OPS_HOSTNAME} - - name: PUBLIC_MASTER_URL - value: ${PUBLIC_MASTER_URL} - - name: MASTER_URL - value: ${MASTER_URL} - - name: ES_INSTANCE_RAM - value: ${ES_INSTANCE_RAM} - - name: ES_PVC_SIZE - value: ${ES_PVC_SIZE} - - name: ES_PVC_PREFIX - value: ${ES_PVC_PREFIX} - - name: ES_PVC_DYNAMIC - value: ${ES_PVC_DYNAMIC} - - name: ES_CLUSTER_SIZE - value: ${ES_CLUSTER_SIZE} - - name: ES_NODE_QUORUM - value: ${ES_NODE_QUORUM} - - name: ES_RECOVER_AFTER_NODES - value: ${ES_RECOVER_AFTER_NODES} - - name: ES_RECOVER_EXPECTED_NODES - value: ${ES_RECOVER_EXPECTED_NODES} - - name: ES_RECOVER_AFTER_TIME - value: ${ES_RECOVER_AFTER_TIME} - - name: ES_OPS_INSTANCE_RAM - value: ${ES_OPS_INSTANCE_RAM} - - name: ES_OPS_PVC_SIZE - value: ${ES_OPS_PVC_SIZE} - - name: ES_OPS_PVC_PREFIX - value: ${ES_OPS_PVC_PREFIX} - - name: ES_OPS_PVC_DYNAMIC - value: ${ES_OPS_PVC_DYNAMIC} - - name: ES_OPS_CLUSTER_SIZE - value: ${ES_OPS_CLUSTER_SIZE} - - name: ES_OPS_NODE_QUORUM - value: ${ES_OPS_NODE_QUORUM} - - name: ES_OPS_RECOVER_AFTER_NODES - value: ${ES_OPS_RECOVER_AFTER_NODES} - - name: ES_OPS_RECOVER_EXPECTED_NODES - value: ${ES_OPS_RECOVER_EXPECTED_NODES} - - name: ES_OPS_RECOVER_AFTER_TIME - value: ${ES_OPS_RECOVER_AFTER_TIME} - - name: FLUENTD_NODESELECTOR - value: ${FLUENTD_NODESELECTOR} - - name: ES_NODESELECTOR - value: ${ES_NODESELECTOR} - - name: ES_OPS_NODESELECTOR - value: ${ES_OPS_NODESELECTOR} - - name: KIBANA_NODESELECTOR - value: ${KIBANA_NODESELECTOR} - - name: KIBANA_OPS_NODESELECTOR - value: ${KIBANA_OPS_NODESELECTOR} - - name: CURATOR_NODESELECTOR - value: ${CURATOR_NODESELECTOR} - - name: CURATOR_OPS_NODESELECTOR - value: ${CURATOR_OPS_NODESELECTOR} - - name: MODE - value: ${MODE} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: logging-deployer - volumes: - - name: empty - emptyDir: {} - parameters: - - - description: "The mode that the deployer runs in." - name: MODE - value: "install" - - - description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "docker.io/openshift/origin-" - - - description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' - name: IMAGE_VERSION - value: "latest" - - - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." - name: IMAGE_PULL_SECRET - - - description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" - name: INSECURE_REGISTRY - value: "false" - - - description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." - name: ENABLE_OPS_CLUSTER - value: "false" - - - description: "(Deprecated) External hostname where clients will reach kibana" - name: KIBANA_HOSTNAME - value: "kibana.example.com" - - - description: "(Deprecated) External hostname at which admins will visit the ops Kibana." - name: KIBANA_OPS_HOSTNAME - value: kibana-ops.example.com - - - description: "(Deprecated) External URL for the master, for OAuth purposes" - name: PUBLIC_MASTER_URL - value: "https://localhost:8443" - - - description: "(Deprecated) Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc.cluster.local" - - - description: "(Deprecated) How many instances of ElasticSearch to deploy." - name: ES_CLUSTER_SIZE - value: "1" - - - description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." - name: ES_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." - name: ES_PVC_PREFIX - value: "logging-es-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' - name: ES_PVC_DYNAMIC - - - description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_NODE_QUORUM - - - description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." - name: ES_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." - name: ES_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." - name: ES_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." - name: ES_OPS_CLUSTER_SIZE - - - description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." - name: ES_OPS_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_OPS_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." - name: ES_OPS_PVC_PREFIX - value: "logging-es-ops-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' - name: ES_OPS_PVC_DYNAMIC - - - description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_OPS_NODE_QUORUM - - - description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." - name: ES_OPS_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." - name: FLUENTD_NODESELECTOR - value: "logging-infra-fluentd=true" - - - description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." - name: ES_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." - name: ES_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana cluster (label=value)." - name: KIBANA_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana operations cluster (label=value)." - name: KIBANA_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Curator (label=value)." - name: CURATOR_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector operations Curator (label=value)." - name: CURATOR_OPS_NODESELECTOR - value: "" diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml deleted file mode 100644 index d191c0439..000000000 --- a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: - name: metrics-deployer-template - annotations: - description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." - tags: "infrastructure" -labels: - metrics-infra: deployer - provider: openshift - component: deployer -objects: -- - apiVersion: v1 - kind: Pod - metadata: - generateName: metrics-deployer- - spec: - securityContext: {} - containers: - - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} - name: deployer - securityContext: {} - volumeMounts: - - name: secret - mountPath: /secret - readOnly: true - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: MASTER_URL - value: ${MASTER_URL} - - name: MODE - value: ${MODE} - - name: CONTINUE_ON_ERROR - value: ${CONTINUE_ON_ERROR} - - name: REDEPLOY - value: ${REDEPLOY} - - name: IGNORE_PREFLIGHT - value: ${IGNORE_PREFLIGHT} - - name: USE_PERSISTENT_STORAGE - value: ${USE_PERSISTENT_STORAGE} - - name: DYNAMICALLY_PROVISION_STORAGE - value: ${DYNAMICALLY_PROVISION_STORAGE} - - name: HAWKULAR_METRICS_HOSTNAME - value: ${HAWKULAR_METRICS_HOSTNAME} - - name: CASSANDRA_NODES - value: ${CASSANDRA_NODES} - - name: CASSANDRA_PV_SIZE - value: ${CASSANDRA_PV_SIZE} - - name: METRIC_DURATION - value: ${METRIC_DURATION} - - name: USER_WRITE_ACCESS - value: ${USER_WRITE_ACCESS} - - name: HEAPSTER_NODE_ID - value: ${HEAPSTER_NODE_ID} - - name: METRIC_RESOLUTION - value: ${METRIC_RESOLUTION} - - name: STARTUP_TIMEOUT - value: ${STARTUP_TIMEOUT} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: metrics-deployer - volumes: - - name: empty - emptyDir: {} - - name: secret - secret: - secretName: metrics-deployer -parameters: -- - description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "openshift/origin-" -- - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' - name: IMAGE_VERSION - value: "latest" -- - description: "Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc:443" -- - description: "External hostname where clients will reach Hawkular Metrics" - name: HAWKULAR_METRICS_HOSTNAME - required: true -- - description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" - name: MODE - value: "deploy" -- - description: "Set to true to continue even if the deployer runs into an error." - name: CONTINUE_ON_ERROR - value: "false" -- - description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)" - name: REDEPLOY - value: "false" -- - description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." - name: IGNORE_PREFLIGHT - value: "false" -- - description: "Set to true for persistent storage, set to false to use non persistent storage" - name: USE_PERSISTENT_STORAGE - value: "true" -- - description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" - name: DYNAMICALLY_PROVISION_STORAGE - value: "false" -- - description: "The number of Cassandra Nodes to deploy for the initial cluster" - name: CASSANDRA_NODES - value: "1" -- - description: "The persistent volume size for each of the Cassandra nodes" - name: CASSANDRA_PV_SIZE - value: "10Gi" -- - description: "How many days metrics should be stored for." - name: METRIC_DURATION - value: "7" -- - description: "If a user accounts should be allowed to write metrics." - name: USER_WRITE_ACCESS - value: "false" -- - description: "The identifier used when generating metric ids in Hawkular" - name: HEAPSTER_NODE_ID - value: "nodename" -- - description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds" - name: METRIC_RESOLUTION - value: "30s" -- - description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" - name: STARTUP_TIMEOUT - value: "500" diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml index 28feac4e6..28feac4e6 100644 --- a/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml index 80cc4233b..80cc4233b 100644 --- a/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 42f4fc72e..cba0f2de8 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -91,8 +91,6 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta - `openshift_logging_es_ops_pvc_prefix`: logging-es-ops - `openshift_logging_es_ops_recover_after_time`: 5m - `openshift_logging_es_ops_storage_group`: 65534 -- `openshift_logging_es_ops_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'. -- `openshift_logging_es_ops_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'. - `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'. - `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified. - `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified. diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 96ed44011..76dfe518e 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -3,6 +3,10 @@ openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | def openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" openshift_logging_namespace: logging +openshift_logging_nodeselector: null +openshift_logging_labels: {} +openshift_logging_label_key: "" +openshift_logging_label_value: "" openshift_logging_install_logging: True openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" @@ -68,7 +72,7 @@ openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nod openshift_logging_fluentd_cpu_limit: 100m openshift_logging_fluentd_memory_limit: 512Mi openshift_logging_fluentd_es_copy: false -openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}" +openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal if openshift_hosted_logging_use_journal is defined else (docker_log_driver == 'journald') | ternary(True, False) if docker_log_driver is defined else (openshift.docker.log_driver == 'journald') | ternary(True, False) if openshift.docker.log_driver is defined else openshift.docker.options | search('--log-driver=journald') if openshift.docker.options is defined else default(omit) }}" openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}" openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}" openshift_logging_fluentd_hosts: ['--all'] @@ -113,12 +117,19 @@ openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ openshift_logging_es_ops_recover_after_time: 5m openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}" openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}" -openshift_logging_es_ops_number_of_shards: 1 -openshift_logging_es_ops_number_of_replicas: 0 # storage related defaults openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}" +# mux - secure_forward listener service +openshift_logging_mux_allow_external: False +openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" +# this tells the fluentd node agent to use mux instead of sending directly to Elasticsearch +openshift_logging_use_mux_client: False +openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_mux_port: 24284 +openshift_logging_mux_cpu_limit: 100m +openshift_logging_mux_memory_limit: 512Mi # following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly #es_logging_contents: @@ -127,3 +138,5 @@ openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_acc #fluentd_config_contents: #fluentd_throttle_contents: #fluentd_secureforward_contents: +#fluentd_mux_config_contents: +#fluentd_mux_secureforward_contents: diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml index ffb812271..69c5a1663 100644 --- a/roles/openshift_logging/handlers/main.yml +++ b/roles/openshift_logging/handlers/main.yml @@ -4,6 +4,15 @@ when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) notify: Verify API Server +- name: restart master api + systemd: name={{ openshift.common.service_type }}-master-api state=restarted + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + notify: Verify API Server + +- name: restart master controllers + systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and # wait_for port doesn't provide health information. diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 188ea246c..2f5b68b4d 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -44,6 +44,7 @@ - logging-kibana - logging-kibana-proxy - logging-curator + - logging-mux ignore_errors: yes register: delete_result changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 @@ -109,5 +110,6 @@ - logging-curator - logging-elasticsearch - logging-fluentd + - logging-mux register: delete_result changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 740e490e1..b34df018d 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -45,6 +45,21 @@ - procure_component: kibana-internal hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}" +- include: procure_server_certs.yaml + loop_control: + loop_var: cert_info + with_items: + - procure_component: mux + hostnames: "logging-mux, {{openshift_logging_mux_hostname}}" + when: openshift_logging_use_mux + +- include: procure_shared_key.yaml + loop_control: + loop_var: shared_key_info + with_items: + - procure_component: mux + when: openshift_logging_use_mux + - name: Copy proxy TLS configuration file copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json when: server_tls_json is undefined @@ -85,6 +100,14 @@ loop_control: loop_var: node_name +- name: Generate PEM cert for mux + include: generate_pems.yaml component={{node_name}} + with_items: + - system.logging.mux + loop_control: + loop_var: node_name + when: openshift_logging_use_mux + - name: Creating necessary JKS certs include: generate_jks.yaml diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml index 253543f54..b047eb35a 100644 --- a/roles/openshift_logging/tasks/generate_configmaps.yaml +++ b/roles/openshift_logging/tasks/generate_configmaps.yaml @@ -21,6 +21,8 @@ dest="{{local_tmp.stdout}}/elasticsearch-gen-template.yml" vars: - allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}" + - es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" + - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}" when: es_config_contents is undefined changed_when: no @@ -134,3 +136,43 @@ when: fluentd_configmap.stdout is defined changed_when: no check_mode: no + +- block: + - copy: + src: fluent.conf + dest: "{{mktemp.stdout}}/fluent-mux.conf" + when: fluentd_mux_config_contents is undefined + changed_when: no + + - copy: + src: secure-forward.conf + dest: "{{mktemp.stdout}}/secure-forward-mux.conf" + when: fluentd_mux_securefoward_contents is undefined + changed_when: no + + - copy: + content: "{{fluentd_mux_config_contents}}" + dest: "{{mktemp.stdout}}/fluent-mux.conf" + when: fluentd_mux_config_contents is defined + changed_when: no + + - copy: + content: "{{fluentd_mux_secureforward_contents}}" + dest: "{{mktemp.stdout}}/secure-forward-mux.conf" + when: fluentd_mux_secureforward_contents is defined + changed_when: no + + - command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-mux + --from-file=fluent.conf={{mktemp.stdout}}/fluent-mux.conf + --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward-mux.conf -o yaml --dry-run + register: mux_configmap + changed_when: no + + - copy: + content: "{{mux_configmap.stdout}}" + dest: "{{mktemp.stdout}}/templates/logging-mux-configmap.yaml" + when: mux_configmap.stdout is defined + changed_when: no + check_mode: no + when: openshift_logging_use_mux diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml index e77da7a24..f76bb3a0a 100644 --- a/roles/openshift_logging/tasks/generate_routes.yaml +++ b/roles/openshift_logging/tasks/generate_routes.yaml @@ -1,14 +1,14 @@ --- - set_fact: kibana_key={{ lookup('file', openshift_logging_kibana_key) | b64encode }} - when: "{{ openshift_logging_kibana_key | trim | length > 0 }}" + when: openshift_logging_kibana_key | trim | length > 0 changed_when: false - set_fact: kibana_cert={{ lookup('file', openshift_logging_kibana_cert)| b64encode }} - when: "{{openshift_logging_kibana_cert | trim | length > 0}}" + when: openshift_logging_kibana_cert | trim | length > 0 changed_when: false - set_fact: kibana_ca={{ lookup('file', openshift_logging_kibana_ca)| b64encode }} - when: "{{openshift_logging_kibana_ca | trim | length > 0}}" + when: openshift_logging_kibana_ca | trim | length > 0 changed_when: false - set_fact: kibana_ca={{key_pairs | entry_from_named_pair('ca_file') }} diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml index f396bcc6d..c1da49fd8 100644 --- a/roles/openshift_logging/tasks/generate_secrets.yaml +++ b/roles/openshift_logging/tasks/generate_secrets.yaml @@ -34,6 +34,36 @@ check_mode: no changed_when: no +- name: Retrieving the cert to use when generating secrets for mux + slurp: src="{{generated_certs_dir}}/{{item.file}}" + register: mux_key_pairs + with_items: + - { name: "ca_file", file: "ca.crt" } + - { name: "mux_key", file: "system.logging.mux.key"} + - { name: "mux_cert", file: "system.logging.mux.crt"} + - { name: "mux_shared_key", file: "mux_shared_key"} + when: openshift_logging_use_mux + +- name: Generating secrets for mux + template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml + vars: + secret_name: "logging-{{component}}" + secret_key_file: "{{component}}_key" + secret_cert_file: "{{component}}_cert" + secrets: + - {key: ca, value: "{{mux_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"} + - {key: key, value: "{{mux_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"} + - {key: cert, value: "{{mux_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"} + - {key: shared_key, value: "{{mux_key_pairs | entry_from_named_pair('mux_shared_key')| b64decode }}"} + secret_keys: ["ca", "cert", "key", "shared_key"] + with_items: + - mux + loop_control: + loop_var: component + check_mode: no + changed_when: no + when: openshift_logging_use_mux + - name: Generating secrets for kibana proxy template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml vars: @@ -43,7 +73,7 @@ - {key: session-secret, value: "{{session_secret}}"} - {key: server-key, value: "{{kibana_key_file}}"} - {key: server-cert, value: "{{kibana_cert_file}}"} - - {key: server-tls, value: "{{server_tls_file}}"} + - {key: server-tls.json, value: "{{server_tls_file}}"} secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"] kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}" kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}" diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml index 5091c1209..e3a5c5eb3 100644 --- a/roles/openshift_logging/tasks/generate_services.yaml +++ b/roles/openshift_logging/tasks/generate_services.yaml @@ -85,3 +85,35 @@ when: openshift_logging_use_ops | bool check_mode: no changed_when: no + +- name: Generating logging-mux service for external connections + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml + vars: + obj_name: logging-mux + ports: + - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward} + labels: + logging-infra: support + selector: + provider: openshift + component: mux + externalIPs: + - "{{ ansible_eth0.ipv4.address }}" + check_mode: no + changed_when: no + when: openshift_logging_mux_allow_external + +- name: Generating logging-mux service for intra-cluster connections + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml + vars: + obj_name: logging-mux + ports: + - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward} + labels: + logging-infra: support + selector: + provider: openshift + component: mux + check_mode: no + changed_when: no + when: openshift_logging_use_mux and not openshift_logging_mux_allow_external diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml index 28fad420b..a981e7f7f 100644 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -3,62 +3,51 @@ set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }} - set_fact: openshift_logging_es_pvc_prefix="logging-es" - when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''" + when: not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == '' -- set_fact: es_pvc_pool={{[]}} +- set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }} + with_sequence: count={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} -- set_fact: openshift_logging_es_pvc_prefix="{{ openshift_logging_es_pvc_prefix | default('logging-es') }}" - -- name: Generate PersistentVolumeClaims - include: "{{ role_path}}/tasks/generate_pvcs.yaml" +### evaluate if the PVC attached to the dc currently matches the provided vars +## if it does then we reuse that pvc in the DC +- include: set_es_storage.yaml vars: - es_pv_selector: "{{openshift_logging_es_pv_selector}}" - es_pvc_dynamic: "{{openshift_logging_es_pvc_dynamic | bool}}" - es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}" - es_pvc_prefix: "{{openshift_logging_es_pvc_prefix}}" - es_pvc_size: "{{openshift_logging_es_pvc_size}}" - es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}" - es_cluster_size: "{{openshift_logging_es_cluster_size}}" - es_access_modes: "{{ openshift_logging_storage_access_modes }}" - -# we should initialize the es_dc_pool with the current keys -- name: Init pool of DeploymentConfig names for Elasticsearch - set_fact: es_dc_pool={{ es_dc_pool | default([]) + [deploy_name] }} - with_items: "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}" + es_component: es + es_name: "{{ deployment.0 }}" + es_spec: "{{ deployment.1 }}" + es_pvc_count: "{{ deployment.2 | int }}" + es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}" + es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() | count }}" + es_pvc_size: "{{ openshift_logging_es_pvc_size }}" + es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}" + es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}" + es_pv_selector: "{{ openshift_logging_es_pv_selector }}" + es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}" + es_memory_limit: "{{ openshift_logging_es_memory_limit }}" + with_together: + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}" + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}" + - "{{ es_indices | default([]) }}" loop_control: - loop_var: deploy_name - -# This should be used to generate new DC names if necessary -- name: Create new DeploymentConfig names for Elasticsearch - set_fact: es_dc_pool={{es_dc_pool|default([]) + [deploy_name]}} - vars: - component: es - es_cluster_name: "{{component}}" - deploy_name_prefix: "logging-{{component}}" - deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" - with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_current_es_size | int }} - check_mode: no + loop_var: deployment +## if it does not then we should create one that does and attach it -- name: Generate Elasticsearch DeploymentConfig - template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml +## create new dc/pvc is needed +- include: set_es_storage.yaml vars: - component: es - logging_component: elasticsearch - deploy_name_prefix: "logging-{{component}}" - image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" - es_cluster_name: "{{component}}" - es_cpu_limit: "{{openshift_logging_es_cpu_limit }}" - es_memory_limit: "{{openshift_logging_es_memory_limit}}" - pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}" - deploy_name: "{{item.1}}" - es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}" - es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}" - es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}" - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}" - with_indexed_items: - - "{{ es_dc_pool }}" - check_mode: no - changed_when: no + es_component: es + es_name: "logging-es-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" + es_spec: "{}" + es_pvc_count: "{{ item | int - 1 }}" + es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}" + es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch.pvcs.keys() | count, openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count] | max }}" + es_pvc_size: "{{ openshift_logging_es_pvc_size }}" + es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}" + es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}" + es_pv_selector: "{{ openshift_logging_es_pv_selector }}" + es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}" + es_memory_limit: "{{ openshift_logging_es_memory_limit }}" + with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs | count }} # --------- Tasks for Operation clusters --------- @@ -73,74 +62,57 @@ es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}" cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}" when: - - openshift_logging_use_ops | bool - - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}" + - openshift_logging_use_ops | bool + - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}" check_mode: no - set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops" - when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''" - -- set_fact: es_pvc_pool={{[]}} - -- name: Generate PersistentVolumeClaims for Ops - include: "{{ role_path}}/tasks/generate_pvcs.yaml" - vars: - es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}" - es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}" - es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}" - es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}" - es_cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}" - es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic | bool}}" - es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}" - es_access_modes: "{{ openshift_logging_storage_access_modes }}" - when: - - openshift_logging_use_ops | bool - check_mode: no + when: not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == '' -- name: Init pool of DeploymentConfig names for Elasticsearch Ops - set_fact: es_ops_dc_pool={{ es_ops_dc_pool | default([]) + [deploy_name] }} - with_items: "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}" - loop_control: - loop_var: deploy_name +- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }} + with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }} when: - - openshift_logging_use_ops | bool + - openshift_logging_use_ops | bool -- name: Create new DeploymentConfig names for Elasticsearch Ops - set_fact: es_ops_dc_pool={{es_ops_dc_pool | default([]) + [deploy_name]}} +- include: set_es_storage.yaml vars: - component: es-ops - es_cluster_name: "{{component}}" - deploy_name_prefix: "logging-{{component}}" - deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" - cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}" - with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_current_es_ops_size | int }} + es_component: es-ops + es_name: "{{ deployment.0 }}" + es_spec: "{{ deployment.1 }}" + es_pvc_count: "{{ deployment.2 | int }}" + es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}" + es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count }}" + es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" + es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}" + es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}" + es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" + es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" + es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" + with_together: + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}" + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}" + - "{{ es_ops_indices | default([]) }}" + loop_control: + loop_var: deployment when: - - openshift_logging_use_ops | bool - check_mode: no + - openshift_logging_use_ops | bool +## if it does not then we should create one that does and attach it -- name: Generate Elasticsearch DeploymentConfig for Ops - template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml +## create new dc/pvc is needed +- include: set_es_storage.yaml vars: - component: es-ops - logging_component: elasticsearch - deploy_name_prefix: "logging-{{component}}" - image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" - pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}" - deploy_name: "{{item.1}}" - es_cluster_name: "{{component}}" - es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}" - es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}" - es_node_quorum: "{{es_ops_node_quorum}}" - es_recover_after_nodes: "{{es_ops_recover_after_nodes}}" - es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}" - openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}" - es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}" - es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}" - es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}" - es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}" - with_indexed_items: - - "{{ es_ops_dc_pool | default([]) }}" + es_component: es-ops + es_name: "logging-es-ops-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" + es_spec: "{}" + es_pvc_count: "{{ item | int - 1 }}" + es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}" + es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count, openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count] | max }}" + es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" + es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}" + es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}" + es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" + es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" + es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" + with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }} when: - - openshift_logging_use_ops | bool - check_mode: no - changed_when: no + - openshift_logging_use_ops | bool diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml index 35273829c..6bc405819 100644 --- a/roles/openshift_logging/tasks/install_fluentd.yaml +++ b/roles/openshift_logging/tasks/install_fluentd.yaml @@ -32,7 +32,7 @@ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd register: fluentd_output - failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr" + failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr check_mode: no when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 @@ -49,6 +49,6 @@ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd register: fluentd2_output - failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr" + failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr check_mode: no when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 83b68fa77..aec455c22 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -27,6 +27,10 @@ loop_control: loop_var: install_component +- name: Install logging mux + include: "{{ role_path }}/tasks/install_mux.yaml" + when: openshift_logging_use_mux + - find: paths={{ mktemp.stdout }}/templates patterns=*.yaml register: object_def_files changed_when: no diff --git a/roles/openshift_logging/tasks/install_mux.yaml b/roles/openshift_logging/tasks/install_mux.yaml new file mode 100644 index 000000000..91eeb95a1 --- /dev/null +++ b/roles/openshift_logging/tasks/install_mux.yaml @@ -0,0 +1,67 @@ +--- +- set_fact: mux_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }} + check_mode: no + +- set_fact: mux_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }} + check_mode: no + +- name: Check mux current replica count + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-mux + -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} + register: mux_replica_count + when: not ansible_check_mode + ignore_errors: yes + changed_when: no + +- name: Generating mux deploymentconfig + template: src=mux.j2 dest={{mktemp.stdout}}/templates/logging-mux-dc.yaml + vars: + component: mux + logging_component: mux + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-fluentd:{{openshift_logging_image_version}}" + es_host: logging-es + es_port: "{{openshift_logging_es_port}}" + ops_host: "{{ mux_ops_host }}" + ops_port: "{{ mux_ops_port }}" + mux_cpu_limit: "{{openshift_logging_mux_cpu_limit}}" + mux_memory_limit: "{{openshift_logging_mux_memory_limit}}" + replicas: "{{mux_replica_count.stdout | default (0)}}" + mux_node_selector: "{{openshift_logging_mux_nodeselector | default({})}}" + check_mode: no + changed_when: no + +- name: "Check mux hostmount-anyuid permissions" + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + get scc/hostmount-anyuid -o jsonpath='{.users}' + register: mux_hostmount_anyuid + check_mode: no + changed_when: no + +- name: "Set hostmount-anyuid permissions for mux" + command: > + {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy + add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd + register: mux_output + failed_when: mux_output.rc == 1 and 'exists' not in mux_output.stderr + check_mode: no + when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 + +- name: "Check mux cluster-reader permissions" + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}' + register: mux_cluster_reader + check_mode: no + changed_when: no + +- name: "Set cluster-reader permissions for mux" + command: > + {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy + add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd + register: mux2_output + failed_when: mux2_output.rc == 1 and 'exists' not in mux2_output.stderr + check_mode: no + when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml index da0bbb627..877ce3149 100644 --- a/roles/openshift_logging/tasks/install_support.yaml +++ b/roles/openshift_logging/tasks/install_support.yaml @@ -1,17 +1,36 @@ --- # This is the base configuration for installing the other components -- name: Check for logging project already exists - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers - register: logging_project_result - ignore_errors: yes - when: not ansible_check_mode - changed_when: no +- name: Set logging project + oc_project: + state: present + name: "{{ openshift_logging_namespace }}" + node_selector: "{{ openshift_logging_nodeselector | default(null) }}" + +- name: Labelling logging project + oc_label: + state: present + kind: namespace + name: "{{ openshift_logging_namespace }}" + labels: + - key: "{{ item.key }}" + value: "{{ item.value }}" + with_dict: "{{ openshift_logging_labels | default({}) }}" + when: + - openshift_logging_labels is defined + - openshift_logging_labels is dict -- name: "Create logging project" - command: > - {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}} - when: not ansible_check_mode and "not found" in logging_project_result.stderr +- name: Labelling logging project + oc_label: + state: present + kind: namespace + name: "{{ openshift_logging_namespace }}" + labels: + - key: "{{ openshift_logging_label_key }}" + value: "{{ openshift_logging_label_value }}" + when: + - openshift_logging_label_key is defined + - openshift_logging_label_key != "" + - openshift_logging_label_value is defined - name: Create logging cert directory file: path={{openshift.common.config_base}}/logging state=directory mode=0755 diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index c7f4a2f93..387da618d 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -1,7 +1,7 @@ --- - fail: msg: Only one Fluentd nodeselector key pair should be provided - when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1" + when: openshift_logging_fluentd_nodeselector.keys() | count > 1 - name: Set default image variables based on deployment_type include_vars: "{{ item }}" diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml index cb9509de1..a0ed56ebd 100644 --- a/roles/openshift_logging/tasks/oc_apply.yaml +++ b/roles/openshift_logging/tasks/oc_apply.yaml @@ -1,52 +1,52 @@ --- -- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} - command: > - {{ openshift.common.client_binary }} - --config={{ kubeconfig }} - get {{file_content.kind}} {{file_content.metadata.name}} - -o jsonpath='{.metadata.resourceVersion}' - -n {{namespace}} - register: generation_init - failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''" - changed_when: no +- oc_obj: + kind: "{{ file_content.kind }}" + name: "{{ file_content.metadata.name }}" + state: present + namespace: "{{ namespace }}" + files: + - "{{ file_name }}" + when: file_content.kind not in ["Service", "Route"] -- name: Applying {{file_name}} - command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} - apply -f {{ file_name }} - -n {{ namespace }} - register: generation_apply - failed_when: "'error' in generation_apply.stderr" - changed_when: no +## still need to do this for services until the template logic is replaced by oc_* +- block: + - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} + command: > + {{ openshift.common.client_binary }} + --config={{ kubeconfig }} + get {{file_content.kind}} {{file_content.metadata.name}} + -o jsonpath='{.metadata.resourceVersion}' + -n {{namespace}} + register: generation_init + failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''" + changed_when: no -- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} - command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} - get {{file_content.kind}} {{file_content.metadata.name}} - -o jsonpath='{.metadata.resourceVersion}' - -n {{namespace}} - register: generation_changed - failed_when: "'not found' not in generation_changed.stderr and generation_changed.stdout == ''" - changed_when: generation_changed.stdout | default (0) | int > generation_init.stdout | default(0) | int - when: - - "'field is immutable' not in generation_apply.stderr" + - name: Applying {{file_name}} + command: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + apply -f {{ file_name }} + -n {{ namespace }} + register: generation_apply + failed_when: "'error' in generation_apply.stderr" + changed_when: no -- name: Removing previous {{file_name}} - command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} - delete -f {{ file_name }} - -n {{ namespace }} - register: generation_delete - failed_when: "'error' in generation_delete.stderr" - changed_when: generation_delete.rc == 0 - when: "'field is immutable' in generation_apply.stderr" + - name: Removing previous {{file_name}} + command: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + delete -f {{ file_name }} + -n {{ namespace }} + register: generation_delete + failed_when: "'error' in generation_delete.stderr" + changed_when: generation_delete.rc == 0 + when: "'field is immutable' in generation_apply.stderr" -- name: Recreating {{file_name}} - command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} - apply -f {{ file_name }} - -n {{ namespace }} - register: generation_apply - failed_when: "'error' in generation_apply.stderr" - changed_when: generation_apply.rc == 0 - when: "'field is immutable' in generation_apply.stderr" + - name: Recreating {{file_name}} + command: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + apply -f {{ file_name }} + -n {{ namespace }} + register: generation_apply + failed_when: "'error' in generation_apply.stderr" + changed_when: generation_apply.rc == 0 + when: "'field is immutable' in generation_apply.stderr" + when: file_content.kind in ["Service", "Route"] diff --git a/roles/openshift_logging/tasks/procure_shared_key.yaml b/roles/openshift_logging/tasks/procure_shared_key.yaml new file mode 100644 index 000000000..056ff6b98 --- /dev/null +++ b/roles/openshift_logging/tasks/procure_shared_key.yaml @@ -0,0 +1,25 @@ +--- +- name: Checking for {{ shared_key_info.procure_component }}_shared_key + stat: path="{{generated_certs_dir}}/{{ shared_key_info.procure_component }}_shared_key" + register: component_shared_key_file + check_mode: no + +- name: Trying to discover shared key variable name for {{ shared_key_info.procure_component }} + set_fact: procure_component_shared_key={{ lookup('env', '{{shared_key_info.procure_component}}' + '_shared_key') }} + when: + - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined + check_mode: no + +- name: Creating shared_key for {{ shared_key_info.procure_component }} + copy: content="{{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}" + dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key" + check_mode: no + when: + - not component_shared_key_file.stat.exists + +- name: Copying shared key for {{ shared_key_info.procure_component }} to generated certs directory + copy: content="{{procure_component_shared_key}}" dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key" + check_mode: no + when: + - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined + - not component_shared_key_file.stat.exists diff --git a/roles/openshift_logging/tasks/set_es_storage.yaml b/roles/openshift_logging/tasks/set_es_storage.yaml new file mode 100644 index 000000000..4afe4e641 --- /dev/null +++ b/roles/openshift_logging/tasks/set_es_storage.yaml @@ -0,0 +1,80 @@ +--- +- set_fact: es_storage_type="{{ es_spec.volumes['elasticsearch-storage'] }}" + when: es_spec.volumes is defined + +- set_fact: es_storage_claim="{{ es_spec.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName }}" + when: + - es_spec.volumes is defined + - es_storage_type.persistentVolumeClaim is defined + +- set_fact: es_storage_claim="" + when: + - not es_spec.volumes is defined or not es_storage_type.persistentVolumeClaim is defined + +## take an ES dc and evaluate its storage option +# if it is a hostmount or emptydir we don't do anything with it +# if its a pvc we see if the corresponding pvc matches the provided specs (if they exist) +- oc_obj: + state: list + kind: pvc + name: "{{ es_storage_claim }}" + namespace: "{{ openshift_logging_namespace }}" + register: pvc_spec + failed_when: pvc_spec.results.stderr is defined + when: + - es_spec.volumes is defined + - es_storage_type.persistentVolumeClaim is defined + +- set_fact: pvc_size="{{ pvc_spec.results.results[0].spec.resources.requests.storage }}" + when: + - pvc_spec.results is defined + - pvc_spec.results.results[0].spec is defined + +# if not create the pvc and use it +- block: + + - name: Generating PersistentVolumeClaims + template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml + vars: + obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}" + size: "{{ es_pvc_size }}" + access_modes: "{{ openshift_logging_storage_access_modes }}" + pv_selector: "{{ es_pv_selector }}" + when: not es_pvc_dynamic | bool + check_mode: no + changed_when: no + + - name: Generating PersistentVolumeClaims - Dynamic + template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml + vars: + obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}" + annotations: + volume.alpha.kubernetes.io/storage-class: "dynamic" + size: "{{ es_pvc_size }}" + access_modes: "{{ openshift_logging_storage_access_modes }}" + pv_selector: "{{ es_pv_selector }}" + when: es_pvc_dynamic | bool + check_mode: no + changed_when: no + + - set_fact: es_storage_claim="{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}" + + when: + - es_pvc_size | search('^\d.*') + - not es_spec.volumes is defined or not es_storage_claim | search( es_pvc_prefix ) or ( not pvc_size | search( es_pvc_size ) and not es_pvc_size | search( pvc_size ) ) + +- name: Generate Elasticsearch DeploymentConfig + template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml + vars: + component: "{{ es_component }}" + deploy_name: "{{ es_name }}" + logging_component: elasticsearch + deploy_name_prefix: "logging-{{ es_component }}" + image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" + es_cluster_name: "{{component}}" + es_cpu_limit: "{{ es_cpu_limit }}" + es_memory_limit: "{{ es_memory_limit }}" + es_node_selector: "{{ es_node_selector }}" + es_storage: "{{ openshift_logging_facts | es_storage( es_name, es_storage_claim ) }}" + check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml index edbb62c3e..c1592b830 100644 --- a/roles/openshift_logging/tasks/start_cluster.yaml +++ b/roles/openshift_logging/tasks/start_cluster.yaml @@ -21,6 +21,29 @@ loop_control: loop_var: fluentd_host +- name: Retrieve mux + oc_obj: + state: list + kind: dc + selector: "component=mux" + namespace: "{{openshift_logging_namespace}}" + register: mux_dc + when: openshift_logging_use_mux + +- name: start mux + oc_scale: + kind: dc + name: "{{ object }}" + namespace: "{{openshift_logging_namespace}}" + replicas: "{{ openshift_logging_mux_replica_count | default (1) }}" + with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}" + loop_control: + loop_var: object + when: + - mux_dc.results is defined + - mux_dc.results.results is defined + - openshift_logging_use_mux + - name: Retrieve elasticsearch oc_obj: state: list diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml index 4b3722e29..f4b419d84 100644 --- a/roles/openshift_logging/tasks/stop_cluster.yaml +++ b/roles/openshift_logging/tasks/stop_cluster.yaml @@ -21,6 +21,26 @@ loop_control: loop_var: fluentd_host +- name: Retrieve mux + oc_obj: + state: list + kind: dc + selector: "component=mux" + namespace: "{{openshift_logging_namespace}}" + register: mux_dc + when: openshift_logging_use_mux + +- name: stop mux + oc_scale: + kind: dc + name: "{{ object }}" + namespace: "{{openshift_logging_namespace}}" + replicas: 0 + with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}" + loop_control: + loop_var: object + when: openshift_logging_use_mux + - name: Retrieve elasticsearch oc_obj: state: list diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml index cef835668..10f522b61 100644 --- a/roles/openshift_logging/tasks/update_master_config.yaml +++ b/roles/openshift_logging/tasks/update_master_config.yaml @@ -4,6 +4,9 @@ dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: assetConfig.loggingPublicURL yaml_value: "https://{{ openshift_logging_kibana_hostname }}" - notify: restart master + notify: + - restart master + - restart master api + - restart master controllers tags: - - update_master_config + - update_master_config diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2 index a0fefd882..c6284166b 100644 --- a/roles/openshift_logging/templates/curator.j2 +++ b/roles/openshift_logging/templates/curator.j2 @@ -89,9 +89,6 @@ spec: - name: config mountPath: /etc/curator/settings readOnly: true - - name: elasticsearch-storage - mountPath: /elasticsearch/persistent - readOnly: true volumes: - name: certs secret: @@ -99,5 +96,3 @@ spec: - name: config configMap: name: logging-curator - - name: elasticsearch-storage - emptyDir: {} diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2 index 0bf1686ad..5c93d823e 100644 --- a/roles/openshift_logging/templates/fluentd.j2 +++ b/roles/openshift_logging/templates/fluentd.j2 @@ -59,6 +59,14 @@ spec: - name: dockercfg mountPath: /etc/sysconfig/docker readOnly: true + - name: dockerdaemoncfg + mountPath: /etc/docker + readOnly: true +{% if openshift_logging_use_mux_client | bool %} + - name: muxcerts + mountPath: /etc/fluent/muxkeys + readOnly: true +{% endif %} env: - name: "K8S_HOST_URL" value: "{{openshift_logging_master_url}}" @@ -122,6 +130,8 @@ spec: value: "{{openshift_logging_fluentd_journal_source | default('')}}" - name: "JOURNAL_READ_FROM_HEAD" value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}" + - name: "USE_MUX_CLIENT" + value: "{{openshift_logging_use_mux_client| default('false')}}" volumes: - name: runlogjournal hostPath: @@ -147,3 +157,11 @@ spec: - name: dockercfg hostPath: path: /etc/sysconfig/docker + - name: dockerdaemoncfg + hostPath: + path: /etc/docker +{% if openshift_logging_use_mux_client | bool %} + - name: muxcerts + secret: + secretName: logging-mux +{% endif %} diff --git a/roles/openshift_logging/templates/mux.j2 b/roles/openshift_logging/templates/mux.j2 new file mode 100644 index 000000000..41e6abd52 --- /dev/null +++ b/roles/openshift_logging/templates/mux.j2 @@ -0,0 +1,121 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" +spec: + replicas: {{replicas|default(0)}} + selector: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Rolling + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + spec: + serviceAccountName: aggregated-logging-fluentd +{% if mux_node_selector is iterable and mux_node_selector | length > 0 %} + nodeSelector: +{% for key, value in mux_node_selector.iteritems() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - name: "mux" + image: {{image}} + imagePullPolicy: Always +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %} + resources: + limits: +{% if mux_cpu_limit is not none %} + cpu: "{{mux_cpu_limit}}" +{% endif %} +{% if mux_memory_limit is not none %} + memory: "{{mux_memory_limit}}" +{% endif %} +{% endif %} + ports: + - containerPort: "{{ openshift_logging_mux_port }}" + name: mux-forward + volumeMounts: + - name: config + mountPath: /etc/fluent/configs.d/user + readOnly: true + - name: certs + mountPath: /etc/fluent/keys + readOnly: true + - name: dockerhostname + mountPath: /etc/docker-hostname + readOnly: true + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: muxcerts + mountPath: /etc/fluent/muxkeys + readOnly: true + env: + - name: "K8S_HOST_URL" + value: "{{openshift_logging_master_url}}" + - name: "ES_HOST" + value: "{{openshift_logging_es_host}}" + - name: "ES_PORT" + value: "{{openshift_logging_es_port}}" + - name: "ES_CLIENT_CERT" + value: "{{openshift_logging_es_client_cert}}" + - name: "ES_CLIENT_KEY" + value: "{{openshift_logging_es_client_key}}" + - name: "ES_CA" + value: "{{openshift_logging_es_ca}}" + - name: "OPS_HOST" + value: "{{ops_host}}" + - name: "OPS_PORT" + value: "{{ops_port}}" + - name: "OPS_CLIENT_CERT" + value: "{{openshift_logging_es_ops_client_cert}}" + - name: "OPS_CLIENT_KEY" + value: "{{openshift_logging_es_ops_client_key}}" + - name: "OPS_CA" + value: "{{openshift_logging_es_ops_ca}}" + - name: "USE_JOURNAL" + value: "false" + - name: "JOURNAL_SOURCE" + value: "{{openshift_logging_fluentd_journal_source | default('')}}" + - name: "JOURNAL_READ_FROM_HEAD" + value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}" + - name: FORWARD_LISTEN_HOST + value: "{{ openshift_logging_mux_hostname }}" + - name: FORWARD_LISTEN_PORT + value: "{{ openshift_logging_mux_port }}" + - name: USE_MUX + value: "true" + - name: MUX_ALLOW_EXTERNAL + value: "{{ openshift_logging_mux_allow_external| default('false') }}" + volumes: + - name: config + configMap: + name: logging-mux + - name: certs + secret: + secretName: logging-fluentd + - name: dockerhostname + hostPath: + path: /etc/hostname + - name: localtime + hostPath: + path: /etc/localtime + - name: muxcerts + secret: + secretName: logging-mux diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2 index 6c4ec0c76..70644a39c 100644 --- a/roles/openshift_logging/templates/service.j2 +++ b/roles/openshift_logging/templates/service.j2 @@ -26,3 +26,9 @@ spec: {% for key, value in selector.iteritems() %} {{key}}: {{value}} {% endfor %} +{% if externalIPs is defined -%} + externalIPs: +{% for ip in externalIPs %} + - {{ ip }} +{% endfor %} +{% endif %} diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index f202486a5..cfc4e2722 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -3,24 +3,13 @@ msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1." when: not openshift.common.version_gte_3_1_or_1_1 | bool -- name: Copy Configuration to temporary conf - command: > - cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}} - changed_when: false - - name: Add Management Infrastructure project - command: > - {{ openshift.common.client_binary }} adm new-project - management-infra - --description="Management Infrastructure" - --config={{manage_iq_tmp_conf}} - register: osmiq_create_mi_project - failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0" - changed_when: osmiq_create_mi_project.rc == 0 + oc_project: + name: management-infra + description: Management Infrastructure - name: Create Admin and Image Inspector Service Account oc_serviceaccount: - kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig" name: "{{ item }}" namespace: management-infra state: present @@ -28,51 +17,42 @@ - management-admin - inspector-admin -- name: Create Cluster Role - shell: > - echo {{ manageiq_cluster_role | to_json | quote }} | - {{ openshift.common.client_binary }} create - --config={{manage_iq_tmp_conf}} - -f - - register: osmiq_create_cluster_role - failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0" - changed_when: osmiq_create_cluster_role.rc == 0 +- name: Create manageiq cluster role + oc_clusterrole: + name: management-infra-admin + rules: + - apiGroups: + - "" + resources: + - pods/proxy + verbs: + - "*" - name: Create Hawkular Metrics Admin Cluster Role - shell: > - echo {{ manageiq_metrics_admin_clusterrole | to_json | quote }} | - {{ openshift.common.client_binary }} - --config={{manage_iq_tmp_conf}} - create -f - - register: oshawkular_create_cluster_role - failed_when: "'already exists' not in oshawkular_create_cluster_role.stderr and oshawkular_create_cluster_role.rc != 0" - changed_when: oshawkular_create_cluster_role.rc == 0 - # AUDIT:changed_when_note: Checking the return code is insufficient - # here. We really need to verify the if the role even exists before - # we run this task. + oc_clusterrole: + name: hawkular-metrics-admin + rules: + - apiGroups: + - "" + resources: + - hawkular-alerts + - hawkular-metrics + verbs: + - "*" - name: Configure role/user permissions - command: > - {{ openshift.common.client_binary }} adm {{item}} - --config={{manage_iq_tmp_conf}} - with_items: "{{manage_iq_tasks}}" - register: osmiq_perm_task - failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0" - changed_when: osmiq_perm_task.rc == 0 - # AUDIT:changed_when_note: Checking the return code is insufficient - # here. We really need to compare the current role/user permissions - # with their expected state. I think we may have a module for this? - + oc_adm_policy_user: + namespace: management-infra + resource_name: "{{ item.resource_name }}" + resource_kind: "{{ item.resource_kind }}" + user: "{{ item.user }}" + with_items: "{{ manage_iq_tasks }}" - name: Configure 3_2 role/user permissions - command: > - {{ openshift.common.client_binary }} adm {{item}} - --config={{manage_iq_tmp_conf}} + oc_adm_policy_user: + namespace: management-infra + resource_name: "{{ item.resource_name }}" + resource_kind: "{{ item.resource_kind }}" + user: "{{ item.user }}" with_items: "{{manage_iq_openshift_3_2_tasks}}" - register: osmiq_perm_3_2_task - failed_when: osmiq_perm_3_2_task.rc != 0 - changed_when: osmiq_perm_3_2_task.rc == 0 when: openshift.common.version_gte_3_2_or_1_2 | bool - -- name: Clean temporary configuration file - file: path={{manage_iq_tmp_conf}} state=absent diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml index 9936bb126..15d667628 100644 --- a/roles/openshift_manageiq/vars/main.yml +++ b/roles/openshift_manageiq/vars/main.yml @@ -1,41 +1,31 @@ --- -openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -manageiq_cluster_role: - apiVersion: v1 - kind: ClusterRole - metadata: - name: management-infra-admin - rules: - - resources: - - pods/proxy - verbs: - - '*' - -manageiq_metrics_admin_clusterrole: - apiVersion: v1 - kind: ClusterRole - metadata: - name: hawkular-metrics-admin - rules: - - apiGroups: - - "" - resources: - - hawkular-metrics - - hawkular-alerts - verbs: - - '*' - -manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig - manage_iq_tasks: -- policy add-role-to-user -n management-infra admin -z management-admin -- policy add-role-to-user -n management-infra management-infra-admin -z management-admin -- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin -- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin -- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin -- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin -- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin -- policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin +- resource_kind: role + resource_name: admin + user: management-admin +- resource_kind: role + resource_name: management-infra-admin + user: management-admin +- resource_kind: cluster-role + resource_name: cluster-reader + user: system:serviceaccount:management-infra:management-admin +- resource_kind: scc + resource_name: privileged + user: system:serviceaccount:management-infra:management-admin +- resource_kind: cluster-role + resource_name: system:image-puller + user: system:serviceaccount:management-infra:inspector-admin +- resource_kind: scc + resource_name: privileged + user: system:serviceaccount:management-infra:inspector-admin +- resource_kind: cluster-role + resource_name: self-provisioner + user: system:serviceaccount:management-infra:management-admin +- resource_kind: cluster-role + resource_name: hawkular-metrics-admin + user: system:serviceaccount:management-infra:management-admin manage_iq_openshift_3_2_tasks: -- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin +- resource_kind: cluster-role + resource_name: system:image-auditor + user: system:serviceaccount:management-infra:management-admin diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 98e0da1a2..5522fef26 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -194,7 +194,7 @@ state: stopped when: openshift_master_ha | bool register: task_result - failed_when: "task_result|failed and 'could not' not in task_result.msg|lower" + failed_when: task_result|failed and 'could not' not in task_result.msg|lower - set_fact: master_service_status_changed: "{{ start_result | changed }}" diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 506c8b129..58fabddeb 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -90,6 +90,7 @@ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api line: "{{ item }}" with_items: "{{ master_api_aws.stdout_lines | default([]) }}" + no_log: True - name: Preserve Master Controllers Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index d4c9a96ca..2617efaf1 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -64,10 +64,10 @@ --signer-key={{ openshift_ca_key }} --signer-serial={{ openshift_ca_serial }} --overwrite=false + when: item != openshift_ca_host with_items: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) - | difference([openshift_ca_host])}}" + | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}" delegate_to: "{{ openshift_ca_host }}" run_once: true @@ -94,8 +94,8 @@ creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig" with_items: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) - | difference([openshift_ca_host])}}" + | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}" + when: item != openshift_ca_host delegate_to: "{{ openshift_ca_host }}" run_once: true diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index e570392ff..65f85066e 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Custom filters for use in openshift-master ''' diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py index 7f7bc4316..b50d6d9db 100644 --- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py +++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py @@ -40,7 +40,7 @@ class LookupModule(LookupBase): # pylint: disable=line-too-long raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified") if deployment_type == 'origin': - if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']: + if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', 'latest']: raise AnsibleError("Unknown short_version %s" % short_version) elif deployment_type == 'openshift-enterprise': if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']: @@ -49,7 +49,7 @@ class LookupModule(LookupBase): raise AnsibleError("Unknown deployment_type %s" % deployment_type) if deployment_type == 'origin': - # convert short_version to enterpise short_version + # convert short_version to enterprise short_version short_version = re.sub('^1.', '3.', short_version) if short_version == 'latest': diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py index 66e6ecea3..a66cb3c88 100644 --- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py +++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py @@ -41,7 +41,7 @@ class LookupModule(LookupBase): raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified") if deployment_type == 'origin': - if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']: + if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', 'latest']: raise AnsibleError("Unknown short_version %s" % short_version) elif deployment_type == 'openshift-enterprise': if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']: diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 6f8f09b22..f048e0aef 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -128,10 +128,10 @@ - name: Test if scheduler config is readable fail: msg: "Unknown scheduler config apiVersion {{ openshift_master_scheduler_config.apiVersion }}" - when: "{{ openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1' }}" + when: openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1' - name: Set current scheduler predicates and priorities set_fact: openshift_master_scheduler_current_predicates: "{{ openshift_master_scheduler_current_config.predicates }}" openshift_master_scheduler_current_priorities: "{{ openshift_master_scheduler_current_config.priorities }}" - when: "{{ scheduler_config_stat.stat.exists }}" + when: scheduler_config_stat.stat.exists diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py index 1fab84c71..4a28fb8f8 100644 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py +++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py @@ -55,6 +55,8 @@ DEFAULT_PREDICATES_1_5 = [ {'name': 'CheckNodeDiskPressure'}, ] +DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5 + REGION_PREDICATE = { 'name': 'Region', 'argument': { @@ -75,9 +77,8 @@ TEST_VARS = [ ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4), ('1.5', 'origin', DEFAULT_PREDICATES_1_5), ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5), - ('1.6', 'origin', DEFAULT_PREDICATES_1_5), - ('3.6', 'origin', DEFAULT_PREDICATES_1_5), - ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_5), + ('3.6', 'origin', DEFAULT_PREDICATES_3_6), + ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6), ] diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py index 1098f9391..97ef2387e 100644 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py +++ b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py @@ -42,6 +42,8 @@ DEFAULT_PRIORITIES_1_5 = [ {'name': 'TaintTolerationPriority', 'weight': 1} ] +DEFAULT_PRIORITIES_3_6 = DEFAULT_PRIORITIES_1_5 + ZONE_PRIORITY = { 'name': 'Zone', 'argument': { @@ -63,9 +65,8 @@ TEST_VARS = [ ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4), ('1.5', 'origin', DEFAULT_PRIORITIES_1_5), ('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5), - ('1.6', 'origin', DEFAULT_PRIORITIES_1_5), - ('3.6', 'origin', DEFAULT_PRIORITIES_1_5), - ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5), + ('3.6', 'origin', DEFAULT_PRIORITIES_3_6), + ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_3_6), ] diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml index ffb812271..69c5a1663 100644 --- a/roles/openshift_metrics/handlers/main.yml +++ b/roles/openshift_metrics/handlers/main.yml @@ -4,6 +4,15 @@ when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) notify: Verify API Server +- name: restart master api + systemd: name={{ openshift.common.service_type }}-master-api state=restarted + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + notify: Verify API Server + +- name: restart master controllers + systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and # wait_for port doesn't provide health information. diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml index 07b7eca33..fb4fe2f03 100644 --- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml @@ -14,20 +14,22 @@ changed_when: no - name: generate password for hawkular metrics - local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}" + local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}" with_items: - hawkular-metrics +- local_action: slurp src="{{ local_tmp.stdout }}/hawkular-metrics.pwd" + register: hawkular_metrics_pwd + no_log: true + - name: generate htpasswd file for hawkular metrics - local_action: > - shell htpasswd -ci - '{{ local_tmp.stdout }}/hawkular-metrics.htpasswd' hawkular - < '{{ local_tmp.stdout }}/hawkular-metrics.pwd' + local_action: htpasswd path="{{ local_tmp.stdout }}/hawkular-metrics.htpasswd" name=hawkular password="{{ hawkular_metrics_pwd.content | b64decode }}" + no_log: true - name: copy local generated passwords to target copy: - src: "{{local_tmp.stdout}}/{{item}}" - dest: "{{mktemp.stdout}}/{{item}}" + src: "{{ local_tmp.stdout }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ item }}" with_items: - hawkular-metrics.pwd - hawkular-metrics.htpasswd diff --git a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml b/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml deleted file mode 100644 index ced2df1d0..000000000 --- a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- -- name: generate heapster key/cert - command: > - {{ openshift.common.admin_binary }} ca create-server-cert - --config={{ mktemp.stdout }}/admin.kubeconfig - --key='{{ mktemp.stdout }}/heapster.key' - --cert='{{ mktemp.stdout }}/heapster.cert' - --hostnames=heapster - --signer-cert='{{ mktemp.stdout }}/ca.crt' - --signer-key='{{ mktemp.stdout }}/ca.key' - --signer-serial='{{ mktemp.stdout }}/ca.serial.txt' - -- when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines" - block: - - name: read files for the heapster secret - slurp: src={{ item }} - register: heapster_secret - with_items: - - "{{ mktemp.stdout }}/heapster.cert" - - "{{ mktemp.stdout }}/heapster.key" - - "{{ client_ca }}" - vars: - custom_ca: "{{ mktemp.stdout }}/heapster_client_ca.crt" - default_ca: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - client_ca: "{{ custom_ca|exists|ternary(custom_ca, default_ca) }}" - - name: generate heapster secret template - template: - src: secret.j2 - dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml" - force: no - vars: - name: heapster-secrets - labels: - metrics-infra: heapster - data: - heapster.cert: "{{ heapster_secret.results[0].content }}" - heapster.key: "{{ heapster_secret.results[1].content }}" - heapster.client-ca: "{{ heapster_secret.results[2].content }}" - heapster.allowed-users: > - {{ openshift_metrics_heapster_allowed_users|b64encode }} diff --git a/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml new file mode 100644 index 000000000..e81d90ae7 --- /dev/null +++ b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml @@ -0,0 +1,14 @@ +--- +- name: generate heapster secret template + template: + src: secret.j2 + dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml" + force: no + vars: + name: heapster-secrets + labels: + metrics-infra: heapster + data: + heapster.allowed-users: > + {{ openshift_metrics_heapster_allowed_users|b64encode }} + when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines" diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index a467c1a51..3b4e8560f 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -23,7 +23,7 @@ changed_when: false - set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics" - when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''" + when: not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == '' - name: generate hawkular-cassandra persistent volume claims template: diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml index 8d27c4930..0eb852d91 100644 --- a/roles/openshift_metrics/tasks/install_heapster.yaml +++ b/roles/openshift_metrics/tasks/install_heapster.yaml @@ -22,7 +22,7 @@ with_items: - hawkular-metrics-certs - hawkular-metrics-account - when: "not {{ openshift_metrics_heapster_standalone | bool }}" + when: not openshift_metrics_heapster_standalone | bool - name: Generating serviceaccount for heapster template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/metrics-{{obj_name}}-sa.yaml @@ -41,6 +41,8 @@ - {port: 80, targetPort: http-endpoint} selector: name: "{{obj_name}}" + annotations: + service.alpha.openshift.io/serving-cert-secret-name: heapster-certs labels: metrics-infra: "{{obj_name}}" name: "{{obj_name}}" @@ -64,4 +66,4 @@ namespace: "{{ openshift_metrics_project }}" changed_when: no -- include: generate_heapster_certificates.yaml +- include: generate_heapster_secrets.yaml diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index ffe6f63a2..74eb56713 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -10,11 +10,11 @@ - cassandra loop_control: loop_var: include_file - when: "not {{ openshift_metrics_heapster_standalone | bool }}" + when: not openshift_metrics_heapster_standalone | bool - name: Install Heapster Standalone include: install_heapster.yaml - when: "{{ openshift_metrics_heapster_standalone | bool }}" + when: openshift_metrics_heapster_standalone | bool - find: paths={{ mktemp.stdout }}/templates patterns=*.yaml register: object_def_files @@ -48,7 +48,7 @@ - name: Scaling down cluster to recognize changes include: stop_metrics.yaml - when: "{{ existing_metrics_rc.stdout_lines | length > 0 }}" + when: existing_metrics_rc.stdout_lines | length > 0 - name: Scaling up cluster include: start_metrics.yaml diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index c8d222c60..e8b7bea5c 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -19,7 +19,7 @@ - name: Create temp directory for all our templates file: path={{mktemp.stdout}}/templates state=directory mode=0755 changed_when: False - when: "{{ openshift_metrics_install_metrics | bool }}" + when: openshift_metrics_install_metrics | bool - name: Create temp directory local on control node local_action: command mktemp -d diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml index b5a1c8f06..2037e8dc3 100644 --- a/roles/openshift_metrics/tasks/start_metrics.yaml +++ b/roles/openshift_metrics/tasks/start_metrics.yaml @@ -20,7 +20,7 @@ loop_control: loop_var: object when: metrics_cassandra_rc is defined - changed_when: "{{metrics_cassandra_rc | length > 0 }}" + changed_when: metrics_cassandra_rc | length > 0 - command: > {{openshift.common.client_binary}} @@ -42,7 +42,7 @@ with_items: "{{metrics_metrics_rc.stdout_lines}}" loop_control: loop_var: object - changed_when: "{{metrics_metrics_rc | length > 0 }}" + changed_when: metrics_metrics_rc | length > 0 - command: > {{openshift.common.client_binary}} diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml index f69bb0f11..9a2ce9267 100644 --- a/roles/openshift_metrics/tasks/stop_metrics.yaml +++ b/roles/openshift_metrics/tasks/stop_metrics.yaml @@ -41,7 +41,7 @@ with_items: "{{metrics_hawkular_rc.stdout_lines}}" loop_control: loop_var: object - changed_when: "{{metrics_hawkular_rc | length > 0 }}" + changed_when: metrics_hawkular_rc | length > 0 - command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig @@ -63,4 +63,4 @@ loop_control: loop_var: object when: metrics_cassandra_rc is defined - changed_when: "{{metrics_cassandra_rc | length > 0 }}" + changed_when: metrics_cassandra_rc | length > 0 diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 8a6be6237..9a5d52eb6 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -8,7 +8,7 @@ delete --ignore-not-found --selector=metrics-infra all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings register: delete_metrics - changed_when: "delete_metrics.stdout != 'No resources found'" + changed_when: delete_metrics.stdout != 'No resources found' - name: remove rolebindings command: > @@ -16,4 +16,4 @@ delete --ignore-not-found rolebinding/hawkular-view clusterrolebinding/heapster-cluster-reader - changed_when: "delete_metrics.stdout != 'No resources found'" + changed_when: delete_metrics.stdout != 'No resources found' diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml index 20fc45fd4..be1e3c3a0 100644 --- a/roles/openshift_metrics/tasks/update_master_config.yaml +++ b/roles/openshift_metrics/tasks/update_master_config.yaml @@ -4,6 +4,9 @@ dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: assetConfig.metricsPublicURL yaml_value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics" - notify: restart master + notify: + - restart master + - restart master api + - restart master controllers tags: - - update_master_config + - update_master_config diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2 index f01ccfd58..ab998c2fb 100644 --- a/roles/openshift_metrics/templates/heapster.j2 +++ b/roles/openshift_metrics/templates/heapster.j2 @@ -34,9 +34,9 @@ spec: - "heapster-wrapper.sh" - "--wrapper.allowed_users_file=/secrets/heapster.allowed-users" - "--source=kubernetes.summary_api:${MASTER_URL}?useServiceAccount=true&kubeletHttps=true&kubeletPort=10250" - - "--tls_cert=/secrets/heapster.cert" - - "--tls_key=/secrets/heapster.key" - - "--tls_client_ca=/secrets/heapster.client-ca" + - "--tls_cert=/heapster-certs/tls.crt" + - "--tls_key=/heapster-certs/tls.key" + - "--tls_client_ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" - "--allowed_users=%allowed_users%" - "--metric_resolution={{openshift_metrics_resolution}}" {% if not openshift_metrics_heapster_standalone %} @@ -80,6 +80,8 @@ spec: volumeMounts: - name: heapster-secrets mountPath: "/secrets" + - name: heapster-certs + mountPath: "/heapster-certs" {% if not openshift_metrics_heapster_standalone %} - name: hawkular-metrics-certs mountPath: "/hawkular-metrics-certs" @@ -94,6 +96,9 @@ spec: - name: heapster-secrets secret: secretName: heapster-secrets + - name: heapster-certs + secret: + secretName: heapster-certs {% if not openshift_metrics_heapster_standalone %} - name: hawkular-metrics-certs secret: diff --git a/roles/openshift_metrics/templates/service.j2 b/roles/openshift_metrics/templates/service.j2 index 8df89127b..ce0bc2eec 100644 --- a/roles/openshift_metrics/templates/service.j2 +++ b/roles/openshift_metrics/templates/service.j2 @@ -2,6 +2,12 @@ apiVersion: "v1" kind: "Service" metadata: name: "{{obj_name}}" +{% if annotations is defined%} + annotations: +{% for key, value in annotations.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} {% if labels is defined%} labels: {% for key, value in labels.iteritems() %} diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 59003bbf9..656874f56 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -38,28 +38,33 @@ - name: Check for swap usage command: grep "^[^#].*swap" /etc/fstab # grep: match any lines which don't begin with '#' and contain 'swap' - # command: swapon --summary - # Alternate option, however if swap entries are in fstab, swap will be - # enabled at boot. Grepping fstab should catch a condition when swap was - # disabled, but the fstab entries were not removed. changed_when: false failed_when: false register: swap_result - # Disable Swap Block +# Disable Swap Block - block: - name: Disable swap command: swapoff --all - name: Remove swap entries from /etc/fstab + replace: + dest: /etc/fstab + regexp: '(^[^#].*swap.*)' + replace: '# \1' + backup: yes + + - name: Add notice about disabling swap lineinfile: dest: /etc/fstab - regexp: 'swap' - state: absent + line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' + state: present - when: swap_result.stdout_lines | length > 0 - # End Disable Swap Block + when: + - swap_result.stdout_lines | length > 0 + - openshift_disable_swap | default(true) | bool +# End Disable Swap Block # We have to add tuned-profiles in the same transaction otherwise we run into depsolving # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. @@ -142,7 +147,7 @@ - regex: '^AWS_SECRET_ACCESS_KEY=' line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" no_log: True - when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined" + when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined notify: - restart node diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml index 1aa826c09..502f80434 100644 --- a/roles/openshift_node_certificates/handlers/main.yml +++ b/roles/openshift_node_certificates/handlers/main.yml @@ -6,6 +6,6 @@ - name: restart docker after updating ca trust systemd: - name: docker + name: "{{ openshift.docker.service_name }}" state: restarted when: not openshift_certificates_redeploy | default(false) | bool diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml index e91891ca9..416cf605a 100644 --- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml +++ b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml @@ -6,20 +6,6 @@ # - docker_version # - skip_docker_restart -# We need docker service up to remove all the images, but these services will keep -# trying to re-start and thus re-pull the images we're trying to delete. -- name: Stop containerized services - service: name={{ item }} state=stopped - with_items: - - "{{ openshift.common.service_type }}-master" - - "{{ openshift.common.service_type }}-master-api" - - "{{ openshift.common.service_type }}-master-controllers" - - "{{ openshift.common.service_type }}-node" - - etcd_container - - openvswitch - failed_when: false - when: openshift.common.is_containerized | bool - - name: Check Docker image count shell: "docker images -aq | wc -l" register: docker_image_count @@ -45,5 +31,4 @@ - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present -- include: restart.yml - when: not skip_docker_restart | default(False) | bool +# starting docker happens back in ../main.yml where it calls ../restart.yml diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index 01bd3bf38..94c97d0a5 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -9,6 +9,28 @@ # - openshift_release # tasks file for openshift_node_upgrade + +- name: Stop node and openvswitch services + service: + name: "{{ item }}" + state: stopped + with_items: + - "{{ openshift.common.service_type }}-node" + - openvswitch + failed_when: false + +- name: Stop additional containerized services + service: + name: "{{ item }}" + state: stopped + with_items: + - "{{ openshift.common.service_type }}-master" + - "{{ openshift.common.service_type }}-master-controllers" + - "{{ openshift.common.service_type }}-master-api" + - etcd_container + failed_when: false + when: openshift.common.is_containerized | bool + - include: docker/upgrade.yml vars: # We will restart Docker ourselves after everything is ready: @@ -16,7 +38,6 @@ when: - l_docker_upgrade is defined - l_docker_upgrade | bool - - not openshift.common.is_containerized | bool - include: "{{ node_config_hook }}" when: node_config_hook is defined @@ -67,16 +88,6 @@ state: latest when: not openshift.common.is_containerized | bool -- name: Restart openvswitch - systemd: - name: openvswitch - state: started - when: - - not openshift.common.is_containerized | bool - -# Mandatory Docker restart, ensure all containerized services are running: -- include: docker/restart.yml - - name: Update oreg value yedit: src: "{{ openshift.common.config_base }}/node/node-config.yaml" @@ -88,10 +99,6 @@ - name: Check for swap usage command: grep "^[^#].*swap" /etc/fstab # grep: match any lines which don't begin with '#' and contain 'swap' - # command: swapon --summary - # Alternate option, however if swap entries are in fstab, swap will be - # enabled at boot. Grepping fstab should catch a condition when swap was - # disabled, but the fstab entries were not removed. changed_when: false failed_when: false register: swap_result @@ -103,19 +110,25 @@ command: swapoff --all - name: Remove swap entries from /etc/fstab + replace: + dest: /etc/fstab + regexp: '(^[^#].*swap.*)' + replace: '# \1' + backup: yes + + - name: Add notice about disabling swap lineinfile: dest: /etc/fstab - regexp: 'swap' - state: absent + line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' + state: present - when: swap_result.stdout_lines | length > 0 + when: + - swap_result.stdout_lines | length > 0 + - openshift_disable_swap | default(true) | bool # End Disable Swap Block -- name: Restart rpm node service - service: - name: "{{ openshift.common.service_type }}-node" - state: restarted - when: not openshift.common.is_containerized | bool +# Restart all services +- include: restart.yml - name: Wait for node to be ready oc_obj: diff --git a/roles/openshift_node_upgrade/tasks/docker/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml index 176fc3c0b..e576228ba 100644 --- a/roles/openshift_node_upgrade/tasks/docker/restart.yml +++ b/roles/openshift_node_upgrade/tasks/restart.yml @@ -6,13 +6,15 @@ # - openshift.master.api_port - name: Restart docker - service: name=docker state=restarted + service: + name: "{{ openshift.docker.service_name }}" + state: restarted - name: Update docker facts openshift_facts: role: docker -- name: Restart containerized services +- name: Start services service: name={{ item }} state=started with_items: - etcd_container @@ -22,7 +24,6 @@ - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" failed_when: false - when: openshift.common.is_containerized | bool - name: Wait for master API to come back online wait_for: diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml index 57279c665..b53b6afa1 100644 --- a/roles/openshift_provisioners/tasks/install_efs.yaml +++ b/roles/openshift_provisioners/tasks/install_efs.yaml @@ -65,6 +65,6 @@ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs register: efs_output - failed_when: "efs_output.rc == 1 and 'exists' not in efs_output.stderr" + failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr check_mode: no when: efs_anyuid.stdout.find("system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs") == -1 diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 84a0905cc..9a9436fcb 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -40,4 +40,21 @@ - openshift_deployment_type == 'origin' - openshift_enable_origin_repo | default(true) | bool + # Singleton block + - when: r_osr_first_run | default(true) + block: + - name: Ensure clean repo cache in the event repos have been changed manually + debug: + msg: "First run of openshift_repos" + changed_when: true + notify: refresh cache + + - name: Set fact r_osr_first_run false + set_fact: + r_osr_first_run: false + + # Force running ALL handlers now, because we expect repo cache to be cleared + # if changes have been made. + - meta: flush_handlers + when: not ostree_booted.stat.exists diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index cf0fb94c9..7b310dbf8 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -8,10 +8,24 @@ Requirements * Ansible 2.2 +Host Groups +----------- + +The following group is expected to be populated for this role to run: + +* `[glusterfs]` + +Additionally, the following group may be specified either in addition to or +instead of the above group to deploy a GlusterFS cluster for use by a natively +hosted Docker registry: + +* `[glusterfs_registry]` + Role Variables -------------- -From this role: +This role has the following variables that control the integration of a +GlusterFS cluster into a new or existing OpenShift cluster: | Name | Default value | | |--------------------------------------------------|-------------------------|-----------------------------------------| @@ -31,6 +45,25 @@ From this role: | openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode | openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` +Each role variable also has a corresponding variable to optionally configure a +separate GlusterFS cluster for use as storage for an integrated Docker +registry. These variables start with the prefix +`openshift_storage_glusterfs_registry_` and, for the most part, default to the +values in their corresponding non-registry variables. The following variables +are an exception: + +| Name | Default value | | +|---------------------------------------------------|-----------------------|-----------------------------------------| +| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default' +| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters + +Additionally, this role's behavior responds to the following registry-specific +variable: + +| Name | Default value | Description | +|----------------------------------------------|---------------|------------------------------------------------------------------------------| +| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume | + Dependencies ------------ @@ -47,6 +80,7 @@ Example Playbook hosts: oo_first_master roles: - role: openshift_storage_glusterfs + when: groups.oo_glusterfs_to_config | default([]) | count > 0 ``` License diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index ade850747..ebe9ca30b 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -2,7 +2,7 @@ openshift_storage_glusterfs_timeout: 300 openshift_storage_glusterfs_namespace: 'default' openshift_storage_glusterfs_is_native: True -openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}" +openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs' openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' openshift_storage_glusterfs_wipe: False @@ -15,3 +15,22 @@ openshift_storage_glusterfs_heketi_admin_key: '' openshift_storage_glusterfs_heketi_user_key: '' openshift_storage_glusterfs_heketi_topology_load: True openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}" +openshift_storage_glusterfs_heketi_url: "{{ omit }}" + +openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}" +openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" +openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" +openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry' +openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" +openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" +openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}" +openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" +openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" +openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" +openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" +openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" +openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" +openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}" +openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" +openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" +openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}" diff --git a/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml index c9945be13..c9945be13 100644 --- a/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml index 3f8d8f507..3f8d8f507 100644 --- a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml index c66705752..c66705752 100644 --- a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml diff --git a/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml index df045c170..df045c170 100644 --- a/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml new file mode 100644 index 000000000..fa5fa2cb0 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -0,0 +1,166 @@ +--- +- name: Verify target namespace exists + oc_project: + state: present + name: "{{ glusterfs_namespace }}" + when: glusterfs_is_native or glusterfs_heketi_is_native + +- include: glusterfs_deploy.yml + when: glusterfs_is_native + +- name: Make sure heketi-client is installed + package: name=heketi-client state=present + +- name: Delete pre-existing heketi resources + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "template,route,service,dc,jobs,secret" + selector: "deploy-heketi" + - kind: "template,route,service,dc" + name: "heketi" + - kind: "svc,ep" + name: "heketi-storage-endpoints" + - kind: "sa" + name: "heketi-service-account" + failed_when: False + when: glusterfs_heketi_wipe + +- name: Wait for deploy-heketi pods to terminate + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (glusterfs_timeout / 10) | int }}" + when: glusterfs_heketi_wipe + +- name: Wait for heketi pods to terminate + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (glusterfs_timeout / 10) | int }}" + when: glusterfs_heketi_wipe + +- name: Create heketi service account + oc_serviceaccount: + namespace: "{{ glusterfs_namespace }}" + name: heketi-service-account + state: present + when: glusterfs_heketi_is_native + +- name: Add heketi service account to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account" + resource_kind: scc + resource_name: privileged + state: present + when: glusterfs_heketi_is_native + +- name: Allow heketi service account to view/edit pods + oc_adm_policy_user: + user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account" + resource_kind: role + resource_name: edit + state: present + when: glusterfs_heketi_is_native + +- name: Check for existing deploy-heketi pod + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" + register: heketi_pod + when: glusterfs_heketi_is_native + +- name: Check if need to deploy deploy-heketi + set_fact: + glusterfs_heketi_deploy_is_missing: False + when: + - "glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- name: Check for existing heketi pod + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=heketi-pod" + register: heketi_pod + when: glusterfs_heketi_is_native + +- name: Check if need to deploy heketi + set_fact: + glusterfs_heketi_is_missing: False + when: + - "glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- include: heketi_deploy_part1.yml + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_deploy_is_missing + - glusterfs_heketi_is_missing + +- name: Determine heketi URL + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: list + kind: ep + selector: "glusterfs in (deploy-heketi-service, heketi-service)" + register: heketi_url + until: + - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" + - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" + delay: 10 + retries: "{{ (glusterfs_timeout / 10) | int }}" + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_url is undefined + +- name: Set heketi URL + set_fact: + glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_url is undefined + +- name: Verify heketi service + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list" + changed_when: False + +- name: Generate topology file + template: + src: "{{ openshift.common.examples_content_version }}/topology.json.j2" + dest: "{{ mktemp.stdout }}/topology.json" + when: + - glusterfs_heketi_topology_load + +- name: Load heketi topology + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" + register: topology_load + failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout" + when: + - glusterfs_heketi_topology_load + +- include: heketi_deploy_part2.yml + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_is_missing diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml new file mode 100644 index 000000000..451990240 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -0,0 +1,22 @@ +--- +- set_fact: + glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}" + glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}" + glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}" + glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}" + glusterfs_image: "{{ openshift_storage_glusterfs_image }}" + glusterfs_version: "{{ openshift_storage_glusterfs_version }}" + glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}" + glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" + glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" + glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" + glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" + glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" + glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}" + glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" + glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" + glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}" + glusterfs_nodes: "{{ g_glusterfs_hosts }}" + +- include: glusterfs_common.yml diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 26ca5eebf..579112349 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -1,44 +1,44 @@ --- - assert: - that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1" + that: "glusterfs_nodeselector.keys() | count == 1" msg: Only one GlusterFS nodeselector key pair should be provided - assert: - that: "groups.oo_glusterfs_to_config | count >= 3" + that: "glusterfs_nodes | count >= 3" msg: There must be at least three GlusterFS nodes specified - name: Delete pre-existing GlusterFS resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: "template,daemonset" name: glusterfs state: absent - when: openshift_storage_glusterfs_wipe + when: glusterfs_wipe - name: Unlabel any existing GlusterFS nodes oc_label: name: "{{ item }}" kind: node state: absent - labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" with_items: "{{ groups.all }}" - when: openshift_storage_glusterfs_wipe + when: glusterfs_wipe - name: Delete pre-existing GlusterFS config file: path: /var/lib/glusterd state: absent delegate_to: "{{ item }}" - with_items: "{{ groups.oo_glusterfs_to_config }}" - when: openshift_storage_glusterfs_wipe + with_items: "{{ glusterfs_nodes | default([]) }}" + when: glusterfs_wipe - name: Get GlusterFS storage devices state command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}" register: devices_info delegate_to: "{{ item }}" - with_items: "{{ groups.oo_glusterfs_to_config }}" + with_items: "{{ glusterfs_nodes | default([]) }}" failed_when: False - when: openshift_storage_glusterfs_wipe + when: glusterfs_wipe # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume. - name: Clear GlusterFS storage device contents @@ -46,12 +46,12 @@ delegate_to: "{{ item.item }}" with_items: "{{ devices_info.results }}" when: - - openshift_storage_glusterfs_wipe + - glusterfs_wipe - item.stdout_lines | count > 0 - name: Add service accounts to privileged SCC oc_adm_policy_user: - user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}" + user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}" resource_kind: scc resource_name: privileged state: present @@ -64,8 +64,8 @@ name: "{{ glusterfs_host }}" kind: node state: add - labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" - with_items: "{{ groups.oo_glusterfs_to_config }}" + labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + with_items: "{{ glusterfs_nodes | default([]) }}" loop_control: loop_var: glusterfs_host @@ -76,7 +76,7 @@ - name: Create GlusterFS template oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: template name: glusterfs state: present @@ -85,16 +85,16 @@ - name: Deploy GlusterFS pods oc_process: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" template_name: "glusterfs" create: True params: - IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}" - IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}" + IMAGE_NAME: "{{ glusterfs_image }}" + IMAGE_VERSION: "{{ glusterfs_version }}" - name: Wait for GlusterFS pods oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: pod state: list selector: "glusterfs-node=pod" @@ -102,6 +102,6 @@ until: - "glusterfs_pods.results.results[0]['items'] | count > 0" # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods - - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count" + - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 9f092d5d5..392f4b65b 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -1,7 +1,30 @@ --- +- set_fact: + glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}" + glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}" + glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}" + glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}" + glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" + glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" + glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}" + glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native }}" + glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing }}" + glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing }}" + glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}" + glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}" + glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}" + glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_registry_heketi_user_key }}" + glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}" + glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}" + glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}" + glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}" + +- include: glusterfs_common.yml + when: g_glusterfs_registry_hosts != g_glusterfs_hosts + - name: Delete pre-existing GlusterFS registry resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: "{{ item.kind }}" name: "{{ item.name | default(omit) }}" selector: "{{ item.selector | default(omit) }}" @@ -23,7 +46,7 @@ - name: Create GlusterFS registry endpoints oc_obj: - namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + namespace: "{{ glusterfs_namespace }}" state: present kind: endpoints name: glusterfs-registry-endpoints @@ -32,7 +55,7 @@ - name: Create GlusterFS registry service oc_obj: - namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + namespace: "{{ glusterfs_namespace }}" state: present kind: service name: glusterfs-registry-endpoints @@ -40,9 +63,9 @@ - "{{ mktemp.stdout }}/glusterfs-registry-service.yml" - name: Check if GlusterFS registry volume exists - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list" register: registry_volume - name: Create GlusterFS registry volume - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" - when: "'{{ openshift.hosted.registry.storage.glusterfs.path }}' not in registry_volume.stdout" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" + when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml index 76ae1db75..c14fcfb15 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -8,7 +8,7 @@ - name: Create deploy-heketi resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: template name: deploy-heketi state: present @@ -17,18 +17,18 @@ - name: Deploy deploy-heketi pod oc_process: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" template_name: "deploy-heketi" create: True params: - IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" - IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" - HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" - HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + IMAGE_NAME: "{{ glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}" - name: Wait for deploy-heketi pod oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: pod state: list selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" @@ -38,4 +38,4 @@ # Pod's 'Ready' status must be True - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 84b85e95d..64410a9ab 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -1,6 +1,6 @@ --- - name: Create heketi DB volume - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" register: setup_storage failed_when: False @@ -13,12 +13,12 @@ # Need `command` here because heketi-storage.json contains multiple objects. - name: Copy heketi DB to GlusterFS volume - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}" - when: "setup_storage.rc == 0" + command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}" + when: setup_storage.rc == 0 - name: Wait for copy job to finish oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: job state: list name: "heketi-storage-copy-job" @@ -28,17 +28,17 @@ # Pod's 'Complete' status must be True - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" failed_when: - "'results' in heketi_job.results" - "heketi_job.results.results | count > 0" # Fail when pod's 'Failed' status is True - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1" - when: "setup_storage.rc == 0" + when: setup_storage.rc == 0 - name: Delete deploy resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: "{{ item.kind }}" name: "{{ item.name | default(omit) }}" selector: "{{ item.selector | default(omit) }}" @@ -55,7 +55,7 @@ - name: Create heketi resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: template name: heketi state: present @@ -64,18 +64,18 @@ - name: Deploy heketi pod oc_process: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" template_name: "heketi" create: True params: - IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" - IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" - HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" - HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + IMAGE_NAME: "{{ glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}" - name: Wait for heketi pod oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: pod state: list selector: "glusterfs=heketi-pod" @@ -85,11 +85,11 @@ # Pod's 'Ready' status must be True - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" - name: Determine heketi URL oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" state: list kind: ep selector: "glusterfs=heketi-service" @@ -98,12 +98,12 @@ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" - name: Set heketi URL set_fact: - openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" - name: Verify heketi service - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list" changed_when: False diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml index 265a3cc6e..ebd8db453 100644 --- a/roles/openshift_storage_glusterfs/tasks/main.yml +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -5,174 +5,14 @@ changed_when: False check_mode: no -- name: Verify target namespace exists - oc_project: - state: present - name: "{{ openshift_storage_glusterfs_namespace }}" - when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native - -- include: glusterfs_deploy.yml - when: openshift_storage_glusterfs_is_native - -- name: Make sure heketi-client is installed - package: name=heketi-client state=present - -- name: Delete pre-existing heketi resources - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - kind: "{{ item.kind }}" - name: "{{ item.name | default(omit) }}" - selector: "{{ item.selector | default(omit) }}" - state: absent - with_items: - - kind: "template,route,service,jobs,dc,secret" - selector: "deploy-heketi" - - kind: "template,route,dc,service" - name: "heketi" - - kind: "svc,ep" - name: "heketi-storage-endpoints" - - kind: "sa" - name: "heketi-service-account" - failed_when: False - when: openshift_storage_glusterfs_heketi_wipe - -- name: Wait for deploy-heketi pods to terminate - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - kind: pod - state: list - selector: "glusterfs=deploy-heketi-pod" - register: heketi_pod - until: "heketi_pod.results.results[0]['items'] | count == 0" - delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" - when: openshift_storage_glusterfs_heketi_wipe - -- name: Wait for heketi pods to terminate - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - kind: pod - state: list - selector: "glusterfs=heketi-pod" - register: heketi_pod - until: "heketi_pod.results.results[0]['items'] | count == 0" - delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" - when: openshift_storage_glusterfs_heketi_wipe - -- name: Create heketi service account - oc_serviceaccount: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - name: heketi-service-account - state: present - when: openshift_storage_glusterfs_heketi_is_native - -- name: Add heketi service account to privileged SCC - oc_adm_policy_user: - user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" - resource_kind: scc - resource_name: privileged - state: present - when: openshift_storage_glusterfs_heketi_is_native - -- name: Allow heketi service account to view/edit pods - oc_adm_policy_user: - user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" - resource_kind: role - resource_name: edit - state: present - when: openshift_storage_glusterfs_heketi_is_native - -- name: Check for existing deploy-heketi pod - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - state: list - kind: pod - selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" - register: heketi_pod - when: openshift_storage_glusterfs_heketi_is_native - -- name: Check if need to deploy deploy-heketi - set_fact: - openshift_storage_glusterfs_heketi_deploy_is_missing: False - when: - - "openshift_storage_glusterfs_heketi_is_native" - - "heketi_pod.results.results[0]['items'] | count > 0" - # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - -- name: Check for existing heketi pod - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - state: list - kind: pod - selector: "glusterfs=heketi-pod" - register: heketi_pod - when: openshift_storage_glusterfs_heketi_is_native - -- name: Check if need to deploy heketi - set_fact: - openshift_storage_glusterfs_heketi_is_missing: False +- include: glusterfs_config.yml when: - - "openshift_storage_glusterfs_heketi_is_native" - - "heketi_pod.results.results[0]['items'] | count > 0" - # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - -- include: heketi_deploy_part1.yml - when: - - openshift_storage_glusterfs_heketi_is_native - - openshift_storage_glusterfs_heketi_deploy_is_missing - - openshift_storage_glusterfs_heketi_is_missing - -- name: Determine heketi URL - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - state: list - kind: ep - selector: "glusterfs in (deploy-heketi-service, heketi-service)" - register: heketi_url - until: - - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" - - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" - delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" - when: - - openshift_storage_glusterfs_heketi_is_native - - openshift_storage_glusterfs_heketi_url is undefined - -- name: Set heketi URL - set_fact: - openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" - when: - - openshift_storage_glusterfs_heketi_is_native - - openshift_storage_glusterfs_heketi_url is undefined - -- name: Verify heketi service - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" - changed_when: False - -- name: Generate topology file - template: - src: "{{ openshift.common.examples_content_version }}/topology.json.j2" - dest: "{{ mktemp.stdout }}/topology.json" - when: - - openshift_storage_glusterfs_is_native - - openshift_storage_glusterfs_heketi_topology_load - -- name: Load heketi topology - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" - register: topology_load - failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout" - when: - - openshift_storage_glusterfs_is_native - - openshift_storage_glusterfs_heketi_topology_load - -- include: heketi_deploy_part2.yml - when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing + - g_glusterfs_hosts | default([]) | count > 0 - include: glusterfs_registry.yml - when: "openshift.hosted.registry.storage.kind == 'glusterfs'" + when: + - g_glusterfs_registry_hosts | default([]) | count > 0 + - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap" - name: Delete temp directory file: diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 index d72d085c9..605627ab5 100644 --- a/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 @@ -4,7 +4,7 @@ metadata: name: glusterfs-registry-endpoints subsets: - addresses: -{% for node in groups.oo_glusterfs_to_config %} +{% for node in glusterfs_nodes %} - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} {% endfor %} ports: diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 index eb5b4544f..33d8f9b36 100644 --- a/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 @@ -1,7 +1,7 @@ { "clusters": [ {%- set clusters = {} -%} -{%- for node in groups.oo_glusterfs_to_config -%} +{%- for node in glusterfs_nodes -%} {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} {%- if cluster in clusters -%} {%- set _dummy = clusters[cluster].append(node) -%} diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index fa9b20e92..d8b1158a6 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -86,8 +86,16 @@ include: set_version_rpm.yml when: not is_containerized | bool -- name: Set openshift_version for containerized installation - include: set_version_containerized.yml +- block: + - name: Set openshift_version for containerized installation + include: set_version_containerized.yml + - name: Determine openshift rpm version + include: rpm_version.yml + - name: Fail if rpm version and docker image version are different + fail: + msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}" + # Both versions have the same string representation + when: openshift_rpm_version != openshift_version when: is_containerized | bool # Warn if the user has provided an openshift_image_tag but is not doing a containerized install diff --git a/roles/openshift_version/tasks/rpm_version.yml b/roles/openshift_version/tasks/rpm_version.yml new file mode 100644 index 000000000..bd5e94b43 --- /dev/null +++ b/roles/openshift_version/tasks/rpm_version.yml @@ -0,0 +1,44 @@ +--- +# input_variables: +# - repoquery_cmd +# - openshift.common.service_type +# output_variables: +# - openshift_rpm_version + +# if {{ openshift.common.service_type}}-excluder is enabled, +# the repoquery for {{ openshift.common.service_type}} will not work. +# Thus, create a temporary yum,conf file where exclude= is set to an empty list +- name: Create temporary yum.conf file + command: mktemp -d /tmp/yum.conf.XXXXXX + register: yum_conf_temp_file_result + +- set_fact: + yum_conf_temp_file: "{{yum_conf_temp_file_result.stdout}}/yum.conf" + +- name: Copy yum.conf into the temporary file + copy: + src: /etc/yum.conf + dest: "{{ yum_conf_temp_file }}" + remote_src: True + +- name: Clear the exclude= list in the temporary yum.conf + lineinfile: + # since ansible 2.3 s/dest/path + dest: "{{ yum_conf_temp_file }}" + regexp: '^exclude=' + line: 'exclude=' + +- name: Gather common package version + command: > + {{ repoquery_cmd }} --config "{{ yum_conf_temp_file }}" --qf '%{version}' "{{ openshift.common.service_type}}" + register: common_version + failed_when: false + changed_when: false + +- name: Delete the temporary yum.conf + file: + path: "{{ yum_conf_temp_file_result.stdout }}" + state: absent + +- set_fact: + openshift_rpm_version: "{{ common_version.stdout | default('0.0', True) }}" diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml index c7604af1a..3cf78068b 100644 --- a/roles/openshift_version/tasks/set_version_rpm.yml +++ b/roles/openshift_version/tasks/set_version_rpm.yml @@ -7,42 +7,8 @@ - openshift_pkg_version is defined - openshift_version is not defined -# if {{ openshift.common.service_type}}-excluder is enabled, -# the repoquery for {{ openshift.common.service_type}} will not work. -# Thus, create a temporary yum,conf file where exclude= is set to an empty list -- name: Create temporary yum.conf file - command: mktemp -d /tmp/yum.conf.XXXXXX - register: yum_conf_temp_file_result - -- set_fact: - yum_conf_temp_file: "{{yum_conf_temp_file_result.stdout}}/yum.conf" - -- name: Copy yum.conf into the temporary file - copy: - src: /etc/yum.conf - dest: "{{ yum_conf_temp_file }}" - remote_src: True - -- name: Clear the exclude= list in the temporary yum.conf - lineinfile: - # since ansible 2.3 s/dest/path - dest: "{{ yum_conf_temp_file }}" - regexp: '^exclude=' - line: 'exclude=' - -- name: Gather common package version - command: > - {{ repoquery_cmd }} --config "{{ yum_conf_temp_file }}" --qf '%{version}' "{{ openshift.common.service_type}}" - register: common_version - failed_when: false - changed_when: false - when: openshift_version is not defined - -- name: Delete the temporary yum.conf - file: - path: "{{ yum_conf_temp_file_result.stdout }}" - state: absent - -- set_fact: - openshift_version: "{{ common_version.stdout | default('0.0', True) }}" +- block: + - include: rpm_version.yml + - set_fact: + openshift_version: "{{ openshift_rpm_version }}" when: openshift_version is not defined diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py index 8d4878fa7..aeee3ede8 100755 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 # pylint: disable=fixme, missing-docstring import subprocess diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml index 4b2979887..509655b0c 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewall/firewalld.yml @@ -14,7 +14,7 @@ - iptables - ip6tables register: task_result - failed_when: "task_result|failed and 'could not' not in task_result.msg|lower" + failed_when: task_result|failed and 'could not' not in task_result.msg|lower - name: Wait 10 seconds after disabling iptables pause: diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 38ea2477c..55f2fc471 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -7,7 +7,7 @@ enabled: no masked: yes register: task_result - failed_when: "task_result|failed and 'could not' not in task_result.msg|lower" + failed_when: task_result|failed and 'could not' not in task_result.msg|lower - name: Wait 10 seconds after disabling firewalld pause: diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml index 41673ee40..ea0c42150 100644 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ b/roles/rhel_subscribe/tasks/enterprise.yml @@ -7,7 +7,7 @@ when: deployment_type == 'enterprise' - set_fact: - default_ose_version: '3.4' + default_ose_version: '3.5' when: deployment_type in ['atomic-enterprise', 'openshift-enterprise'] - set_fact: @@ -16,10 +16,13 @@ - fail: msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type" when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or - ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4'] ) + ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5'] ) - name: Enable RHEL repositories command: subscription-manager repos \ --enable="rhel-7-server-rpms" \ --enable="rhel-7-server-extras-rpms" \ - --enable="rhel-7-server-ose-{{ ose_version }}-rpms" + --enable="rhel-7-server-ose-{{ ose_version }}-rpms" \ + --enable="rhel-7-fast-datapath-rpms" + register: subscribe_repos + until: subscribe_repos | succeeded diff --git a/test/integration/README.md b/test/integration/README.md new file mode 100644 index 000000000..948e44c50 --- /dev/null +++ b/test/integration/README.md @@ -0,0 +1,39 @@ +# Integration tests + +Integration tests exercise the OpenShift Ansible playbooks by running them +against an inventory with Docker containers as hosts. + +## Requirements + +The tests assume that: + +* docker is running on localhost and the present user has access to use it. +* golang is installed and the go binary is in PATH. +* python and tox are installed. + +## Building images + +The tests rely on images built in the local docker index. You can build them +from the repository root with: + +``` +./test/integration/build-images.sh +``` + +Use the `--help` option to view available options. + +## Running the tests + +From the repository root, run the integration tests with: + +``` +./test/integration/run-tests.sh +``` + +Use the `--help` option to view available options. + +You can also run tests more directly, for example to run a specific check: + +``` +go test ./test/integration/... -run TestPackageUpdateDepMissing +``` diff --git a/test/integration/build-images.sh b/test/integration/build-images.sh new file mode 100755 index 000000000..74a55fa51 --- /dev/null +++ b/test/integration/build-images.sh @@ -0,0 +1,101 @@ +#!/bin/bash + +# This is intended to run either locally (in which case a push is not +# necessary) or in a CI job (where the results should be pushed to a +# registry for use in later CI test jobs). Images are tagged locally with +# both the base name (e.g. "test-target-base") and with the prefix given; +# then only the prefixed name is pushed if --push is specified, assuming +# any necessary credentials are available for the push. The same prefix +# can then be used for the testing script. By default a local (non-registry) +# prefix is used and no push can occur. To push to e.g. dockerhub: +# +# ./build-images.sh --push --prefix=docker.io/openshift/ansible-integration- + +set -o errexit +set -o nounset +set -o pipefail + +STARTTIME=$(date +%s) +source_root=$(dirname "${0}") + +prefix="${PREFIX:-openshift-ansible-integration-}" +push=false +verbose=false +build_options="${DOCKER_BUILD_OPTIONS:-}" +help=false + +for args in "$@" +do + case $args in + --prefix=*) + prefix="${args#*=}" + ;; + --push) + push=true + ;; + --no-cache) + build_options="${build_options} --no-cache" + ;; + --verbose) + verbose=true + ;; + --help) + help=true + ;; + esac +done + +if [ "$help" = true ]; then + echo "Builds the docker images for openshift-ansible integration tests" + echo "and pushes them to a central registry." + echo + echo "Options: " + echo " --prefix=PREFIX" + echo " The prefix to use for the image names." + echo " default: openshift-ansible-integration-" + echo + echo " --push" + echo " If set will push the tagged image" + echo + echo " --no-cache" + echo " If set will perform the build without a cache." + echo + echo " --verbose" + echo " Enables printing of the commands as they run." + echo + echo " --help" + echo " Prints this help message" + echo + exit 0 +fi + +if [ "$verbose" = true ]; then + set -x +fi + + +declare -a build_order ; declare -A images +build_order+=( test-target-base ) ; images[test-target-base]=openshift_health_checker/builds/test-target-base +build_order+=( preflight-aos-package-checks ); images[preflight-aos-package-checks]=openshift_health_checker/builds/aos-package-checks +for image in "${build_order[@]}"; do + BUILD_STARTTIME=$(date +%s) + docker_tag=${prefix}${image} + echo + echo "--- Building component '$image' with docker tag '$docker_tag' ---" + docker build ${build_options} -t $image -t $docker_tag "$source_root/${images[$image]}" + echo + BUILD_ENDTIME=$(date +%s); echo "--- build $docker_tag took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---" + if [ "$push" = true ]; then + docker push $docker_tag + PUSH_ENDTIME=$(date +%s); echo "--- push $docker_tag took $(($PUSH_ENDTIME - $BUILD_ENDTIME)) seconds ---" + fi +done + +echo +echo +echo "++ Active images" +docker images | grep ${prefix} | sort +echo + + +ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret" diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile b/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile new file mode 100644 index 000000000..8542029f6 --- /dev/null +++ b/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile @@ -0,0 +1,30 @@ +FROM test-target-base + +RUN yum install -y rpm-build rpmdevtools createrepo && \ + rpmdev-setuptree && \ + mkdir -p /mnt/localrepo +ADD root / + +# we will build some RPMs that can be used to break yum update in tests. +RUN cd /root/rpmbuild/SOURCES && \ + mkdir break-yum-update-1.0 && \ + tar zfc foo.tgz break-yum-update-1.0 && \ + rpmbuild -bb /root/break-yum-update.spec && \ + yum install -y /root/rpmbuild/RPMS/noarch/break-yum-update-1.0-1.noarch.rpm && \ + rpmbuild -bb /root/break-yum-update-2.spec && \ + mkdir /mnt/localrepo/break-yum && \ + cp /root/rpmbuild/RPMS/noarch/break-yum-update-1.0-2.noarch.rpm /mnt/localrepo/break-yum && \ + createrepo /mnt/localrepo/break-yum + +# we'll also build some RPMs that can be used to exercise OCP package version tests. +RUN cd /root/rpmbuild/SOURCES && \ + mkdir atomic-openshift-3.2 && \ + mkdir atomic-openshift-3.3 && \ + tar zfc ose.tgz atomic-openshift-3.{2,3} && \ + rpmbuild -bb /root/ose-3.2.spec && \ + rpmbuild -bb /root/ose-3.3.spec && \ + mkdir /mnt/localrepo/ose-3.{2,3} && \ + cp /root/rpmbuild/RPMS/noarch/atomic-openshift*-3.2-1.noarch.rpm /mnt/localrepo/ose-3.2 && \ + createrepo /mnt/localrepo/ose-3.2 && \ + cp /root/rpmbuild/RPMS/noarch/atomic-openshift*-3.3-1.noarch.rpm /mnt/localrepo/ose-3.3 && \ + createrepo /mnt/localrepo/ose-3.3 diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/break-yum.repo b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/break-yum.repo new file mode 100644 index 000000000..f5ccd2d19 --- /dev/null +++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/break-yum.repo @@ -0,0 +1,5 @@ +[break-yum] +name=break-yum +baseurl=file:///mnt/localrepo/break-yum +enabled=0 +gpgcheck=0 diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.2.repo b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.2.repo new file mode 100644 index 000000000..3064d6dbb --- /dev/null +++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.2.repo @@ -0,0 +1,5 @@ +[ose-3.2] +name=ose-3.2 +baseurl=file:///mnt/localrepo/ose-3.2 +enabled=0 +gpgcheck=0 diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.3.repo b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.3.repo new file mode 100644 index 000000000..1466da476 --- /dev/null +++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.3.repo @@ -0,0 +1,5 @@ +[ose-3.3] +name=ose-3.3 +baseurl=file:///mnt/localrepo/ose-3.3 +enabled=0 +gpgcheck=0 diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update-2.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update-2.spec new file mode 100644 index 000000000..ebd7eb443 --- /dev/null +++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update-2.spec @@ -0,0 +1,33 @@ +Name: break-yum-update +Version: 1.0 +Release: 2 +Summary: Package for breaking updates by requiring things that don't exist + +License: NA + +Requires: package-that-does-not-exist +Source0: http://example.com/foo.tgz +BuildArch: noarch + +%description +Package for breaking updates by requiring things that don't exist + + +%prep +%setup -q + + +%build + + +%install +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT + + +%files +%doc + + + +%changelog diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update.spec new file mode 100644 index 000000000..c40675f90 --- /dev/null +++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update.spec @@ -0,0 +1,32 @@ +Name: break-yum-update +Version: 1.0 +Release: 1 +Summary: Package for breaking updates by requiring things that don't exist + +License: NA + +Source0: http://example.com/foo.tgz +BuildArch: noarch + +%description +Package for breaking updates by requiring things that don't exist + + +%prep +%setup -q + + +%build + + +%install +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT + + +%files +%doc + + + +%changelog diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec new file mode 100644 index 000000000..dbc9f0c8e --- /dev/null +++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec @@ -0,0 +1,44 @@ +Name: atomic-openshift +Version: 3.2 +Release: 1 +Summary: package the critical aos packages + +License: NA + +Source0: http://example.com/ose.tgz +BuildArch: noarch + +%package master +Summary: package the critical aos packages +%package node +Summary: package the critical aos packages + +%description +Package for pretending to provide AOS + +%description master +Package for pretending to provide AOS + +%description node +Package for pretending to provide AOS + +%prep +%setup -q + + +%build + + +%install +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT + + +%files +%files master +%files node +%doc + + + +%changelog diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec new file mode 100644 index 000000000..9546e8430 --- /dev/null +++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec @@ -0,0 +1,44 @@ +Name: atomic-openshift +Version: 3.3 +Release: 1 +Summary: package the critical aos packages + +License: NA + +Source0: http://example.com/ose.tgz +BuildArch: noarch + +%package master +Summary: package the critical aos packages +%package node +Summary: package the critical aos packages + +%description +Package for pretending to provide AOS + +%description master +Package for pretending to provide AOS + +%description node +Package for pretending to provide AOS + +%prep +%setup -q + + +%build + + +%install +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT + + +%files +%files master +%files node +%doc + + + +%changelog diff --git a/test/integration/openshift_health_checker/builds/test-target-base/Dockerfile b/test/integration/openshift_health_checker/builds/test-target-base/Dockerfile new file mode 100644 index 000000000..39b33c057 --- /dev/null +++ b/test/integration/openshift_health_checker/builds/test-target-base/Dockerfile @@ -0,0 +1,2 @@ +FROM centos/systemd +RUN yum install -y iproute python-dbus PyYAML yum-utils diff --git a/test/integration/openshift_health_checker/common.go b/test/integration/openshift_health_checker/common.go new file mode 100644 index 000000000..a92d6861d --- /dev/null +++ b/test/integration/openshift_health_checker/common.go @@ -0,0 +1,99 @@ +package test + +import ( + "bytes" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + "testing" +) + +// A PlaybookTest executes a given Ansible playbook and checks the exit code and +// output contents. +type PlaybookTest struct { + // inputs + Path string + // expected outputs + ExitCode int + Output []string // zero or more strings that should be in the output +} + +// Run runs the PlaybookTest. +func (p PlaybookTest) Run(t *testing.T) { + // A PlaybookTest is intended to be run in parallel with other tests. + t.Parallel() + + cmd := exec.Command("ansible-playbook", "-i", "/dev/null", p.Path) + cmd.Env = append(os.Environ(), "ANSIBLE_FORCE_COLOR=1") + b, err := cmd.CombinedOutput() + + // Check exit code. + if (err == nil) && (p.ExitCode != 0) { + p.checkExitCode(t, 0, p.ExitCode, cmd, b) + } + if (err != nil) && (p.ExitCode == 0) { + got, ok := getExitCode(err) + if !ok { + t.Logf("unexpected error (%T): %[1]v", err) + p.logCmdAndOutput(t, cmd, b) + t.FailNow() + } + p.checkExitCode(t, got, p.ExitCode, cmd, b) + } + + // Check output contents. + var missing []string + for _, s := range p.Output { + if !bytes.Contains(b, []byte(s)) { + missing = append(missing, s) + } + } + if len(missing) > 0 { + t.Logf("missing in output: %q", missing) + p.logCmdAndOutput(t, cmd, b) + t.FailNow() + } +} + +// getExitCode returns an exit code and true if the exit code could be taken +// from err, false otherwise. +// The implementation is GOOS-specific, and currently only supports Linux. +func getExitCode(err error) (int, bool) { + exitErr, ok := err.(*exec.ExitError) + if !ok { + return -1, false + } + waitStatus, ok := exitErr.Sys().(syscall.WaitStatus) + if !ok { + return -1, false + } + return waitStatus.ExitStatus(), true +} + +// checkExitCode marks the test as failed when got is different than want. +func (p PlaybookTest) checkExitCode(t *testing.T, got, want int, cmd *exec.Cmd, output []byte) { + if got == want { + return + } + t.Logf("got exit code %v, want %v", got, want) + p.logCmdAndOutput(t, cmd, output) + t.FailNow() +} + +// logCmdAndOutput logs how to re-run a command and a summary of the output of +// its last execution for debugging. +func (p PlaybookTest) logCmdAndOutput(t *testing.T, cmd *exec.Cmd, output []byte) { + const maxLines = 10 + lines := bytes.Split(bytes.TrimRight(output, "\n"), []byte("\n")) + if len(lines) > maxLines { + lines = append([][]byte{[]byte("...")}, lines[len(lines)-maxLines:len(lines)]...) + } + output = bytes.Join(lines, []byte("\n")) + dir, err := filepath.Abs(cmd.Dir) + if err != nil { + panic(err) + } + t.Logf("\n$ (cd %s && %s)\n%s", dir, strings.Join(cmd.Args, " "), output) +} diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml new file mode 100644 index 000000000..31d0d521e --- /dev/null +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml @@ -0,0 +1,20 @@ +--- +- include: ../../setup_container.yml + vars: + image: preflight-aos-package-checks + l_host_vars: + deployment_type: openshift-enterprise + +- name: Fail as required packages cannot be installed + hosts: all + roles: + - openshift_health_checker + tasks: + - block: + + - action: openshift_health_check + args: + checks: [ 'package_availability' ] + + always: # destroy the container whether check passed or not + - include: ../../teardown_container.yml diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml new file mode 100644 index 000000000..16ff41673 --- /dev/null +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml @@ -0,0 +1,20 @@ +--- +- include: ../../setup_container.yml + vars: + image: preflight-aos-package-checks + l_host_vars: + deployment_type: origin + +- name: Succeeds as Origin packages are public + hosts: all + roles: + - openshift_health_checker + tasks: + - block: + + - action: openshift_health_check + args: + checks: [ 'package_availability' ] + + always: # destroy the container whether check passed or not + - include: ../../teardown_container.yml diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml new file mode 100644 index 000000000..7b6e71f91 --- /dev/null +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml @@ -0,0 +1,24 @@ +--- +- include: ../../setup_container.yml + vars: + image: preflight-aos-package-checks + l_host_vars: + openshift_deployment_type: openshift-enterprise + openshift_release: 3.2 + +- name: Fails when a dependency required for update is missing + hosts: all + roles: + - openshift_health_checker + tasks: + - block: + + - include: tasks/enable_repo.yml + vars: { repo_name: "break-yum" } + + - action: openshift_health_check + args: + checks: [ 'package_update' ] + + always: # destroy the container whether check passed or not + - include: ../../teardown_container.yml diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml new file mode 100644 index 000000000..c2e9c3866 --- /dev/null +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml @@ -0,0 +1,31 @@ +--- +- include: ../../setup_container.yml + vars: + image: preflight-aos-package-checks + l_host_vars: + openshift_deployment_type: openshift-enterprise + openshift_release: 3.2 + +- name: Fails when a repo definition is completely broken + hosts: all + roles: + - openshift_health_checker + tasks: + - block: + + - include: tasks/enable_repo.yml + vars: { repo_name: "break-yum" } + + - name: Break the break-yum repo + replace: + dest: /etc/yum.repos.d/break-yum.repo + backup: no + regexp: "^baseurl" + replace: "#baseurl" + + - action: openshift_health_check + args: + checks: [ 'package_update' ] + + always: # destroy the container whether check passed or not + - include: ../../teardown_container.yml diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml new file mode 100644 index 000000000..98d41aad4 --- /dev/null +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml @@ -0,0 +1,21 @@ +--- +- include: ../../setup_container.yml + vars: + image: preflight-aos-package-checks + l_host_vars: + openshift_deployment_type: openshift-enterprise + openshift_release: 3.2 + +- name: Succeeds when nothing blocks a yum update + hosts: all + roles: + - openshift_health_checker + tasks: + - block: + + - action: openshift_health_check + args: + checks: [ 'package_update' ] + + always: # destroy the container whether check passed or not + - include: ../../teardown_container.yml diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml new file mode 100644 index 000000000..60ab9942a --- /dev/null +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml @@ -0,0 +1,27 @@ +--- +- include: ../../setup_container.yml + vars: + image: preflight-aos-package-checks + l_host_vars: + openshift_deployment_type: openshift-enterprise + openshift_release: 3.2 + +- name: Fails when repo content is not available + hosts: all + roles: + - openshift_health_checker + tasks: + - block: + + - include: tasks/enable_repo.yml + vars: { repo_name: "break-yum" } + + - name: Remove the local repo entirely + file: path=/mnt/localrepo state=absent + + - action: openshift_health_check + args: + checks: [ 'package_update' ] + + always: # destroy the container whether check passed or not + - include: ../../teardown_container.yml diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml new file mode 100644 index 000000000..cd60dee5a --- /dev/null +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml @@ -0,0 +1,24 @@ +--- +- include: ../../setup_container.yml + vars: + image: preflight-aos-package-checks + l_host_vars: + deployment_type: openshift-enterprise + openshift_release: 3.2 + +- name: Success when AOS version matches openshift_release + hosts: all + roles: + - openshift_health_checker + tasks: + - block: + + - include: tasks/enable_repo.yml + vars: { repo_name: "ose-3.2" } + + - action: openshift_health_check + args: + checks: [ 'package_version' ] + + always: # destroy the container whether check passed or not + - include: ../../teardown_container.yml diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml new file mode 100644 index 000000000..5939a1ef1 --- /dev/null +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml @@ -0,0 +1,24 @@ +--- +- include: ../../setup_container.yml + vars: + image: preflight-aos-package-checks + l_host_vars: + deployment_type: openshift-enterprise + openshift_release: 3.3 + +- name: Failure when AOS version doesn't match openshift_release + hosts: all + roles: + - openshift_health_checker + tasks: + - block: + + - include: tasks/enable_repo.yml + vars: { repo_name: "ose-3.2" } + + - action: openshift_health_check + args: + checks: [ 'package_version' ] + + always: # destroy the container whether check passed or not + - include: ../../teardown_container.yml diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml new file mode 100644 index 000000000..be0f9bc7a --- /dev/null +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml @@ -0,0 +1,26 @@ +--- +- include: ../../setup_container.yml + vars: + image: preflight-aos-package-checks + l_host_vars: + openshift_deployment_type: openshift-enterprise + +- name: Fails when multiple AOS versions are available + hosts: all + roles: + - openshift_health_checker + tasks: + - block: + + - include: tasks/enable_repo.yml + vars: { repo_name: "ose-3.2" } + + - include: tasks/enable_repo.yml + vars: { repo_name: "ose-3.3" } + + - action: openshift_health_check + args: + checks: [ 'package_version' ] + + always: # destroy the container whether check passed or not + - include: ../../teardown_container.yml diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml new file mode 100644 index 000000000..da3f6b844 --- /dev/null +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml @@ -0,0 +1,20 @@ +--- +- include: ../../setup_container.yml + vars: + image: preflight-aos-package-checks + l_host_vars: + openshift_deployment_type: origin + +- name: Succeeds with Origin although multiple versions are available + hosts: all + roles: + - openshift_health_checker + tasks: + - block: + + - action: openshift_health_check + args: + checks: [ 'package_version' ] + + always: # destroy the container whether check passed or not + - include: ../../teardown_container.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles b/test/integration/openshift_health_checker/preflight/playbooks/roles index 6bc1a7aef..6bc1a7aef 120000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles +++ b/test/integration/openshift_health_checker/preflight/playbooks/roles diff --git a/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml b/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml new file mode 100644 index 000000000..aaacf205e --- /dev/null +++ b/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml @@ -0,0 +1,9 @@ +--- +- name: Enable {{ repo_name }} repo + # believe it or not we can't use the yum_repository module for this. + # https://github.com/ansible/ansible-modules-extras/issues/2384 + ini_file: + dest: /etc/yum.repos.d/{{ repo_name }}.repo + section: "{{ repo_name }}" + option: enabled + value: 1 diff --git a/test/integration/openshift_health_checker/preflight/preflight_test.go b/test/integration/openshift_health_checker/preflight/preflight_test.go new file mode 100644 index 000000000..05ddf139f --- /dev/null +++ b/test/integration/openshift_health_checker/preflight/preflight_test.go @@ -0,0 +1,105 @@ +package preflight + +import ( + "testing" + + . ".." +) + +func TestPackageUpdateDepMissing(t *testing.T) { + PlaybookTest{ + Path: "playbooks/package_update_dep_missing.yml", + ExitCode: 2, + Output: []string{ + "check \"package_update\":", + "Could not perform a yum update.", + "break-yum-update-1.0-2.noarch requires package-that-does-not-exist", + }, + }.Run(t) +} + +func TestPackageUpdateRepoBroken(t *testing.T) { + PlaybookTest{ + Path: "playbooks/package_update_repo_broken.yml", + ExitCode: 2, + Output: []string{ + "check \"package_update\":", + "Error with yum repository configuration: Cannot find a valid baseurl for repo", + }, + }.Run(t) +} + +func TestPackageUpdateRepoDisabled(t *testing.T) { + PlaybookTest{ + Path: "playbooks/package_update_repo_disabled.yml", + ExitCode: 0, + Output: []string{ + "CHECK [package_update", + }, + }.Run(t) +} + +func TestPackageUpdateRepoUnreachable(t *testing.T) { + PlaybookTest{ + Path: "playbooks/package_update_repo_unreachable.yml", + ExitCode: 2, + Output: []string{ + "check \"package_update\":", + "Error getting data from at least one yum repository", + }, + }.Run(t) +} + +func TestPackageVersionMatches(t *testing.T) { + PlaybookTest{ + Path: "playbooks/package_version_matches.yml", + ExitCode: 0, + Output: []string{ + "CHECK [package_version", + }, + }.Run(t) +} + +func TestPackageVersionMismatches(t *testing.T) { + PlaybookTest{ + Path: "playbooks/package_version_mismatches.yml", + ExitCode: 2, + Output: []string{ + "check \"package_version\":", + "Not all of the required packages are available at requested version", + }, + }.Run(t) +} + +func TestPackageVersionMultiple(t *testing.T) { + PlaybookTest{ + Path: "playbooks/package_version_multiple.yml", + ExitCode: 2, + Output: []string{ + "check \"package_version\":", + "Multiple minor versions of these packages are available", + }, + }.Run(t) +} + +func TestPackageAvailabilityMissingRequired(t *testing.T) { + PlaybookTest{ + Path: "playbooks/package_availability_missing_required.yml", + ExitCode: 2, + Output: []string{ + "check \"package_availability\":", + "Cannot install all of the necessary packages.", + "atomic-openshift", + }, + }.Run(t) +} + +func TestPackageAvailabilitySucceeds(t *testing.T) { + PlaybookTest{ + Path: "playbooks/package_availability_succeeds.yml", + ExitCode: 0, + Output: []string{ + "CHECK [package_availability", + }, + }.Run(t) +} diff --git a/test/integration/openshift_health_checker/setup_container.yml b/test/integration/openshift_health_checker/setup_container.yml new file mode 100644 index 000000000..8793d954e --- /dev/null +++ b/test/integration/openshift_health_checker/setup_container.yml @@ -0,0 +1,45 @@ +--- +# Include this play once for each container you want to create and use as a test host. +# +# Optional parameters on the include are as follows: +# * scenario = unique name for the container to be started +# * image = name of the image to start in the container +# * command = command to run in the container +# * l_groups = host groups that the container should be added to +# * l_host_vars = any variables that should be added to the host + +- name: Start container for specified test host + gather_facts: no + hosts: localhost + connection: local + tasks: + + - set_fact: + # This is a little weird but if we use a var instead of a fact, + # a different random value is generated for each task. See: + # https://opensolitude.com/2015/05/27/ansible-lookups-variables-vs-facts.html + container_name: openshift_ansible_test_{{ scenario | default(100000000000000 | random) }} + + - name: start container + docker_container: + name: "{{ container_name }}" + image: "{{ lookup('env', 'IMAGE_PREFIX') | default('openshift-ansible-integration-', true) }}{{ image | default('test-target-base') }}" + command: "{{ command | default('sleep 1800') }}" + recreate: yes + # NOTE: When/if we need to run containers that are docker hosts as well: + # volumes: [ "/var/run/docker.sock:/var/run/docker.sock:z" ] + + - name: add container as host in inventory + add_host: + ansible_connection: docker + name: "{{ container_name }}" + groups: '{{ l_groups | default("masters,nodes,etcd") }}' + + # There ought to be a better way to transfer the host vars, but see: + # https://groups.google.com/forum/#!topic/Ansible-project/Jwx8RYhqxPA + - name: set host facts per test parameters + set_fact: + "{{ item.key }}": "{{ item.value }}" + delegate_facts: True + delegate_to: "{{ container_name }}" + with_dict: "{{ l_host_vars | default({}) }}" diff --git a/test/integration/openshift_health_checker/teardown_container.yml b/test/integration/openshift_health_checker/teardown_container.yml new file mode 100644 index 000000000..fe11e2617 --- /dev/null +++ b/test/integration/openshift_health_checker/teardown_container.yml @@ -0,0 +1,23 @@ +--- + +# Include this to delete the current test host container. +# +# In order to recover from test exceptions, this cleanup is expected to +# be done in an "always:" task on the same block as the test task(s). So +# it happens in a task "on" the host being tested. In order to delete the +# host's container, the task uses its own hostname (which is same as the +# container name) but delegates the docker action to localhost. + +- block: + + # so handlers don't break the test by trying to run after teardown: + - meta: flush_handlers + + always: + + - name: delete test container + delegate_to: localhost + connection: local + docker_container: + name: "{{ inventory_hostname }}" + state: absent diff --git a/test/integration/run-tests.sh b/test/integration/run-tests.sh new file mode 100755 index 000000000..680b64602 --- /dev/null +++ b/test/integration/run-tests.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +# This script runs the golang integration tests in the directories underneath. +# It should be run from the same directory it is in, or in a directory above. +# Specify the same image prefix used (if any) with build-images.sh +# +# Example: +# ./run-tests.sh --prefix=docker.io/openshift/ansible-integration- --parallel=16 + +set -o errexit +set -o nounset +set -o pipefail + +source_root=$(dirname "${0}") + +prefix="${PREFIX:-openshift-ansible-integration-}" +gotest_options="${GOTEST_OPTIONS:--v}" +push=false +verbose=false +help=false + +for args in "$@" +do + case $args in + --prefix=*) + prefix="${args#*=}" + ;; + --parallel=*) + gotest_options="${gotest_options} -parallel ${args#*=}" + ;; + --verbose) + verbose=true + ;; + --help) + help=true + ;; + esac +done + +if [ "$help" = true ]; then + echo "Runs the openshift-ansible integration tests." + echo + echo "Options: " + echo " --prefix=PREFIX" + echo " The prefix to use for the image names." + echo " default: openshift-ansible-integration-" + echo + echo " --parallel=NUMBER" + echo " Number of tests to run in parallel." + echo " default: GOMAXPROCS (typically, number of processors)" + echo + echo " --verbose" + echo " Enables printing of the commands as they run." + echo + echo " --help" + echo " Prints this help message" + echo + exit 0 +fi + + + +if ! [ -d $source_root/../../.tox/integration ]; then + # have tox create a consistent virtualenv + pushd $source_root/../..; tox -e integration; popd +fi +# use the virtualenv from tox +set +o nounset; source $source_root/../../.tox/integration/bin/activate; set -o nounset + +if [ "$verbose" = true ]; then + set -x +fi + +# Run the tests. NOTE: "go test" requires a relative path for this purpose. +# The PWD trick below will only work if cwd is in/above where this script lives. +retval=0 +IMAGE_PREFIX="${prefix}" env -u GOPATH \ + go test ./${source_root#$PWD}/... ${gotest_options} + + diff --git a/test/openshift_version_tests.py b/test/openshift_version_tests.py index 52e9a9888..393a4d6ba 100644 --- a/test/openshift_version_tests.py +++ b/test/openshift_version_tests.py @@ -44,7 +44,7 @@ class OpenShiftVersionTests(unittest.TestCase): {'name': 'oo_version_gte_3_5_or_1_5', 'positive_enterprise_version': '3.6.0', 'negative_enterprise_version': '3.4.0', - 'positive_origin_version': '1.6.0', + 'positive_origin_version': '3.6.0', 'negative_origin_version': '1.4.0'}] def test_legacy_gte_filters(self): diff --git a/test/modify_yaml_tests.py b/test/unit/modify_yaml_tests.py index 0dc25df82..65b2db44c 100644 --- a/test/modify_yaml_tests.py +++ b/test/unit/modify_yaml_tests.py @@ -5,7 +5,7 @@ import os import sys import unittest -sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../library/")] + sys.path +sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../../library/")] + sys.path # pylint: disable=import-error from modify_yaml import set_key # noqa: E402 @@ -3,6 +3,7 @@ minversion=2.3.1 envlist = py{27,35}-{flake8,pylint,unit} py27-{yamllint,ansible_syntax,generate_validation} + integration skipsdist=True skip_missing_interpreters=True @@ -12,6 +13,7 @@ deps = -rrequirements.txt -rtest-requirements.txt py35-flake8: flake8-bugbear==17.3.0 + integration: docker-py==1.10.6 commands = unit: pip install -e utils @@ -22,3 +24,9 @@ commands = generate_validation: python setup.py generate_validation # TODO(rhcarvalho): check syntax of other important entrypoint playbooks ansible_syntax: python setup.py ansible_syntax + # ansible 2.2.2+ unfortunately breaks the integration test runner + # because it can no longer set facts on the test docker hosts. + # So for now, install separate ansible version for integration. + # PR that fixes it: https://github.com/ansible/ansible/pull/23599 + # Once that PR is available, drop this and use same ansible. + integration: pip install ansible==2.2.1.0 diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index a6d784dea..71dcf87aa 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -34,6 +34,12 @@ UPGRADE_MAPPINGS = { '3.5': { 'minor_version': '3.5', 'minor_playbook': 'v3_5/upgrade.yml', + 'major_playbook': 'v3_6/upgrade.yml', + 'major_version': '3.6', + }, + '3.6': { + 'minor_version': '3.6', + 'minor_playbook': 'v3_6/upgrade.yml', }, } diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py index f25266f29..1574d447a 100644 --- a/utils/src/ooinstall/variants.py +++ b/utils/src/ooinstall/variants.py @@ -39,18 +39,19 @@ class Variant(object): # WARNING: Keep the versions ordered, most recent first: OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', [ - Version('3.5', 'openshift-enterprise'), + Version('3.6', 'openshift-enterprise'), ]) REG = Variant('openshift-enterprise', 'Registry', [ - Version('3.4', 'openshift-enterprise', 'registry'), + Version('3.6', 'openshift-enterprise', 'registry'), ]) origin = Variant('origin', 'OpenShift Origin', [ - Version('1.4', 'origin'), + Version('3.6', 'origin'), ]) LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', [ + Version('3.5', 'openshift-enterprise'), Version('3.4', 'openshift-enterprise'), Version('3.3', 'openshift-enterprise'), Version('3.2', 'openshift-enterprise'), |