summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--ansible.cfg4
-rw-r--r--docs/proposals/crt_management_proposal.md113
-rw-r--r--openshift-ansible.spec34
-rw-r--r--playbooks/adhoc/uninstall.yml73
-rw-r--r--playbooks/aws/openshift-cluster/hosted.yml22
-rw-r--r--playbooks/aws/openshift-cluster/install.yml28
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md3
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/README.md18
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml9
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/README.md18
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml9
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/README.md18
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml9
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/README.md2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml36
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml66
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml20
l---------playbooks/common/openshift-cluster/upgrades/v3_3/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml10
l---------playbooks/common/openshift-cluster/upgrades/v3_4/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml116
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml111
l---------playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml10
l---------playbooks/common/openshift-cluster/upgrades/v3_5/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml120
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml67
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml8
-rw-r--r--roles/docker/defaults/main.yml2
-rw-r--r--roles/docker/meta/main.yml1
-rw-r--r--roles/docker/tasks/package_docker.yml5
-rw-r--r--roles/docker/tasks/registry_auth.yml16
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml6
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml8
-rw-r--r--roles/lib_openshift/library/oc_edit.py15
-rw-r--r--roles/lib_openshift/src/ansible/oc_edit.py15
-rw-r--r--roles/lib_utils/library/docker_creds.py207
-rw-r--r--roles/openshift_aws/defaults/main.yml14
-rw-r--r--roles/openshift_aws/files/describeinstances.json15
-rw-r--r--roles/openshift_aws/files/trustpolicy.json12
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml3
-rw-r--r--roles/openshift_aws/tasks/iam_role.yml36
-rw-r--r--roles/openshift_aws/tasks/launch_config_create.yml4
-rw-r--r--roles/openshift_master/defaults/main.yml1
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml20
-rw-r--r--roles/openshift_node/defaults/main.yml3
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml1
-rw-r--r--roles/openshift_node/tasks/config.yml1
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml19
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml1
-rw-r--r--roles/openshift_node_upgrade/tasks/registry_auth.yml19
-rw-r--r--utils/etc/ansible.cfg4
-rw-r--r--utils/src/ooinstall/cli_installer.py18
67 files changed, 691 insertions, 1461 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 6fab7527e..88c353122 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.5-1 ./
+3.8.0-0.1.0 ./
diff --git a/ansible.cfg b/ansible.cfg
index 589a58e9d..5a4c1cd80 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -33,6 +33,10 @@ callback_whitelist = profile_tasks
# Uncomment to use the provided AWS dynamic inventory script
#hostfile = inventory/aws/ec2.py
+[inventory]
+# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
+unparsed_is_failed=true
+
# Additional ssh options for OpenShift Ansible
[ssh_connection]
pipelining = True
diff --git a/docs/proposals/crt_management_proposal.md b/docs/proposals/crt_management_proposal.md
new file mode 100644
index 000000000..5fc1ad08d
--- /dev/null
+++ b/docs/proposals/crt_management_proposal.md
@@ -0,0 +1,113 @@
+# Container Runtime Management
+
+## Description
+origin and openshift-ansible support multiple container runtimes. This proposal
+is related to refactoring how we handle those runtimes in openshift-ansible.
+
+### Problems addressed
+We currently don't install docker during the install at a point early enough to
+not fail health checks, and we don't have a good story around when/how to do it.
+This is complicated by logic around containerized and non-containerized installs.
+
+A web of dependencies can cause changes to docker that are unintended and has
+resulted in a series of work-around such as 'skip_docker' boolean.
+
+We don't handle docker storage because it's BYO. By moving docker to a prerequisite
+play, we can tackle storage up front and never have to touch it again.
+
+container_runtime logic is currently spread across 3 roles: docker, openshift_docker,
+and openshift_docker_facts. The name 'docker' does not accurately portray what
+the role(s) do.
+
+## Rationale
+* Refactor docker (and related meta/fact roles) into 'container_runtime' role.
+* Strip all meta-depends on container runtime out of other roles and plays.
+* Create a 'prerequisites.yml' entry point that will setup various items
+such as container storage and container runtime before executing installation.
+* All other roles and plays should merely consume container runtime, should not
+configure, restart, or change the container runtime as much as feasible.
+
+## Design
+
+The container_runtime role should be comprised of 3 'pseudo-roles' which will be
+consumed using include_role; each component area should be enabled/disabled with
+a boolean value, defaulting to true.
+
+I call them 'pseudo-roles' because they are more or less independent functional
+areas that may share some variables and act on closely related components. This
+is an effort to reuse as much code as possible, limit role-bloat (we already have
+an abundance of roles), and make things as modular as possible.
+
+```yaml
+# prerequisites.yml
+- include: std_include.yml
+- include: container_runtime_setup.yml
+...
+# container_runtime_setup.yml
+- hosts: "{{ openshift_runtime_manage_hosts | default('oo_nodes_to_config') }}"
+ tasks:
+ - include_role:
+ name: container_runtime
+ tasks_from: install.yml
+ when: openshift_container_runtime_install | default(True) | bool
+ - include_role:
+ name: container_runtime
+ tasks_from: storage.yml
+ when: openshift_container_runtime_storage | default(True) | bool
+ - include_role:
+ name: container_runtime
+ tasks_from: configure.yml
+ when: openshift_container_runtime_configure | default(True) | bool
+```
+
+Note the host group on the above play. No more guessing what hosts to run this
+stuff against. If you want to use an atomic install, specify what hosts will need
+us to setup container runtime (such as etcd hosts, loadbalancers, etc);
+
+We should direct users that are using atomic hosts to disable install in the docs,
+let's not add a bunch of logic.
+
+Alternatively, we can create a new group.
+
+### Part 1, container runtime install
+Install the container runtime components of the desired type.
+
+```yaml
+# install.yml
+- include: docker.yml
+ when: openshift_container_runtime_install_docker | bool
+
+- include: crio.yml
+ when: openshift_container_runtime_install_crio | bool
+
+... other container run times...
+```
+
+Alternatively to using booleans for each run time, we could use a variable like
+"openshift_container_runtime_type". This would be my preference, as we could
+use this information in later roles.
+
+### Part 2, configure/setup container runtime storage
+Configure a supported storage solution for containers.
+
+Similar setup to the previous section. We might need to add some logic for the
+different runtimes here, or we maybe create a matrix of possible options.
+
+### Part 3, configure container runtime.
+Place config files, environment files, systemd units, etc. Start/restart
+the container runtime as needed.
+
+Similar to Part 1 with how we should do things.
+
+## Checklist
+* Strip docker from meta dependencies.
+* Combine docker facts and meta roles into container_runtime role.
+* Docs
+
+## User Story
+As a user of openshift-ansible, I want to be able to manage my container runtime
+and related components independent of openshift itself.
+
+## Acceptance Criteria
+* Verify that each container runtime installs with this new method.
+* Verify that openshift installs with this new method.
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 8bd9cd0f3..76a56e5cf 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.8.0
-Release: 0.0.0%{?dist}
+Release: 0.1.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -285,6 +285,38 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Nov 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.1.0
+- Allow disabling authorization migration check (sdodson@redhat.com)
+- Alternative method to create docker registry auth creds (mgugino@redhat.com)
+- Nuke /var/lib/dockershim/sandbox/* while nodes are drained
+ (sdodson@redhat.com)
+- crio: sync crio.conf (gscrivan@redhat.com)
+- Updating provisioning order. (kwoodson@redhat.com)
+- Regex anchors changed to match protocol start and ports.
+ (kwoodson@redhat.com)
+- First pass at v3.8 support (sdodson@redhat.com)
+- Run registry auth after docker restart (mgugino@redhat.com)
+- Fix extension script for catalog (mgugino@redhat.com)
+- Adding instance profile support for node groups. (kwoodson@redhat.com)
+- Bumping openshift-ansible to 3.8 (smunilla@redhat.com)
+- ansible.cfg: error when inventory does not parse (lmeyer@redhat.com)
+- removing kind restrictions from oc_edit (kwoodson@redhat.com)
+- Update Docs. Make Clearer where the actual docs are. (tbielawa@redhat.com)
+- Remove upgrade playbooks for 3.3 through 3.5 (rteague@redhat.com)
+- GlusterFS: Add gluster-s3 functionality (jarrpa@redhat.com)
+- GlusterFS: Add glusterblock functionality (jarrpa@redhat.com)
+- GlusterFS: Update heketi templates for latest version (jarrpa@redhat.com)
+- GlusterFS: Specify resource requests (jarrpa@redhat.com)
+- Remove remaining haproxy files with uninstallation
+ (nakayamakenjiro@gmail.com)
+- Proposal: container_runtime role (mgugino@redhat.com)
+- Fix contenerized documentation? (mickael.canevet@camptocamp.com)
+- Cleans up additional artifacts in uninstall. Closes 3082
+ (gregswift@gmail.com)
+- Add execution times to checkpoint status (rteague@redhat.com)
+- Make clearer *_nfs_directory and *_volume_name (lpsantil@gmail.com)
+- Allow cluster IP for docker-registry service to be set (hansmi@vshn.ch)
+
* Thu Nov 09 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.5-1
-
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 07f10d48c..5ed55a817 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -151,6 +151,14 @@
- lbr0
- vlinuxbr
- vovsbr
+
+ - name: Remove virtual devices
+ command: nmcli delete device "{{ item }}"
+ failed_when: False
+ with_items:
+ - tun0
+ - docker0
+
when: openshift_remove_all | default(true) | bool
- shell: atomic uninstall "{{ item }}"-master-api
@@ -264,12 +272,30 @@
- "{{ directories.results | default([]) }}"
- files
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: restart container-engine
+ service: name=container-engine state=stopped enabled=no
+ failed_when: false
+ register: container_engine
+
+ - name: restart docker
+ service: name=docker state=stopped enabled=no
+ failed_when: false
+ when: not (container_engine | changed)
+ register: l_docker_restart_docker_in_pb_result
+ until: not l_docker_restart_docker_in_pb_result | failed
+ retries: 3
+ delay: 30
+
- name: Remove remaining files
file: path={{ item }} state=absent
with_items:
- /etc/ansible/facts.d/openshift.fact
- /etc/openshift
- /etc/openshift-sdn
+ - /etc/pki/ca-trust/source/anchors/openshift-ca.crt
- /etc/sysconfig/atomic-openshift-node
- /etc/sysconfig/atomic-openshift-node-dep
- /etc/sysconfig/openshift-node-dep
@@ -284,23 +310,38 @@
- /etc/systemd/system/origin-node-dep.service
- /etc/systemd/system/origin-node.service
- /etc/systemd/system/origin-node.service.wants
+ - /var/lib/docker
+
+ - name: Rebuild ca-trust
+ command: update-ca-trust
+
+ - name: Reset Docker proxy configuration
+ lineinfile:
+ state=absent
+ dest=/etc/sysconfig/docker
+ regexp='(NO_PROXY|HTTP_PROXY|HTTPS_PROXY)=.*'
+
+ - name: Reset Docker registry configuration
+ lineinfile:
+ state=absent
+ dest=/etc/sysconfig/docker
+ regexp='(ADD_REGISTRY|BLOCK_REGISTRY|INSECURE_REGISTRY)=.*'
+
+ - name: Detect Docker storage configuration
+ shell: vgs -o name | grep docker
+ register: docker_vg_name
+ failed_when: false
+ changed_when: false
- - shell: systemctl daemon-reload
- changed_when: False
+ - name: Wipe out Docker storage contents
+ command: vgremove -f {{ item }}
+ with_items: "{{ docker_vg_name.stdout_lines }}"
+ when: docker_vg_name.rc == 0
- - name: restart container-engine
- service: name=container-engine state=restarted
- failed_when: false
- register: container_engine
+ - name: Wipe out Docker storage configuration
+ file: path=/etc/sysconfig/docker-storage state=absent
+ when: docker_vg_name.rc == 0
- - name: restart docker
- service: name=docker state=restarted
- failed_when: false
- when: not (container_engine | changed)
- register: l_docker_restart_docker_in_pb_result
- until: not l_docker_restart_docker_in_pb_result | failed
- retries: 3
- delay: 30
- hosts: masters
become: yes
@@ -525,3 +566,7 @@
with_items:
- /etc/ansible/facts.d/openshift.fact
- /var/lib/haproxy/stats
+ # Here we remove only limits.conf rather than directory, as users may put their files.
+ # - /etc/systemd/system/haproxy.service.d
+ - /etc/systemd/system/haproxy.service.d/limits.conf
+ - /etc/systemd/system/haproxy.service
diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml
new file mode 100644
index 000000000..db6e3b8e1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/hosted.yml
@@ -0,0 +1,22 @@
+---
+- include: ../../common/openshift-cluster/openshift_hosted.yml
+
+- include: ../../common/openshift-cluster/openshift_metrics.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- include: ../../common/openshift-cluster/openshift_logging.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- include: ../../common/openshift-cluster/service_catalog.yml
+ when: openshift_enable_service_catalog | default(false) | bool
+
+- include: ../../common/openshift-management/config.yml
+ when: openshift_management_install_management | default(false) | bool
+
+- name: Print deprecated variable warning message if necessary
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - debug: msg="{{__deprecation_message}}"
+ when:
+ - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index 4d0bf9531..1e8118490 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -21,5 +21,29 @@
- name: run the std_include
include: ../../common/openshift-cluster/std_include.yml
-- name: run the config
- include: ../../common/openshift-cluster/config.yml
+- name: perform the installer openshift-checks
+ include: ../../common/openshift-checks/install.yml
+
+- name: etcd install
+ include: ../../common/openshift-etcd/config.yml
+
+- name: include nfs
+ include: ../../common/openshift-nfs/config.yml
+ when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- name: include loadbalancer
+ include: ../../common/openshift-loadbalancer/config.yml
+ when: groups.oo_lb_to_config | default([]) | count > 0
+
+- name: include openshift-master config
+ include: ../../common/openshift-master/config.yml
+
+- name: include master additional config
+ include: ../../common/openshift-master/additional_config.yml
+
+- name: include master additional config
+ include: ../../common/openshift-node/config.yml
+
+- name: include openshift-glusterfs
+ include: ../../common/openshift-glusterfs/config.yml
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
index e787deced..78dd6a49b 100644
--- a/playbooks/aws/openshift-cluster/provision_install.yml
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -6,11 +6,14 @@
- name: Include the provision.yml playbook to create cluster
include: provision.yml
-- name: Include the install.yml playbook to install cluster
+- name: Include the install.yml playbook to install cluster on masters
include: install.yml
-- name: Include the install.yml playbook to install cluster
+- name: provision the infra/compute playbook to install node resources
include: provision_nodes.yml
- name: Include the accept.yml playbook to accept nodes into the cluster
include: accept.yml
+
+- name: Include the hosted.yml playbook to finish the hosted configuration
+ include: hosted.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
index 0f64f40f3..d9b1fc2ca 100644
--- a/playbooks/byo/openshift-cluster/upgrades/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -4,6 +4,5 @@ cluster. Additional notes for the associated upgrade playbooks are
provided in their respective directories.
# Upgrades available
+- [OpenShift Container Platform 3.6 to 3.7](v3_7/README.md) (works also to upgrade OpenShift Origin from 3.6.x to 3.7.x)
- [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x)
-- [OpenShift Container Platform 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift Origin from 1.4.x to 1.5.x)
-- [OpenShift Container Platform 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift Origin from 1.3.x to 1.4.x)
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md
deleted file mode 100644
index 6892f6324..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# v3.3 Major and Minor Upgrade Playbook
-
-## Overview
-This playbook currently performs the
-following steps.
-
- * Upgrade and restart master services
- * Unschedule node.
- * Upgrade and restart docker
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
deleted file mode 100644
index 697a18c4d..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
deleted file mode 100644
index 4d284c279..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
deleted file mode 100644
index 180a2821f..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md
deleted file mode 100644
index 85b807dc6..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# v3.4 Major and Minor Upgrade Playbook
-
-## Overview
-This playbook currently performs the
-following steps.
-
- * Upgrade and restart master services
- * Unschedule node.
- * Upgrade and restart docker
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
deleted file mode 100644
index 8cce91b3f..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
deleted file mode 100644
index 8e5d0f5f9..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
deleted file mode 100644
index d5329b858..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md
deleted file mode 100644
index 53eebe65e..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# v3.5 Major and Minor Upgrade Playbook
-
-## Overview
-This playbook currently performs the
-following steps.
-
- * Upgrade and restart master services
- * Unschedule node.
- * Upgrade and restart docker
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
deleted file mode 100644
index f44d55ad2..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
deleted file mode 100644
index 2377713fa..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
deleted file mode 100644
index 5b3f6ab06..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../../initialize_groups.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md
index 4bf53be81..914e0f5b2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md
@@ -1,4 +1,4 @@
-# v3.6 Major and Minor Upgrade Playbook
+# v3.7 Major and Minor Upgrade Playbook
## Overview
This playbook currently performs the following steps.
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index c5a90d8c2..ed97d9993 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -3,22 +3,6 @@
# Upgrade Masters
###############################################################################
-# oc adm migrate storage should be run prior to etcd v3 upgrade
-# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
-- name: Pre master upgrade - Upgrade all storage
- hosts: oo_first_master
- tasks:
- - name: Upgrade all storage
- command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=* --confirm
- register: l_pb_upgrade_control_plane_pre_upgrade_storage
- when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
- failed_when:
- - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
- - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
- - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
-
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
# so we must first make sure this is set correctly before attempting the backup.
@@ -48,6 +32,22 @@
- include: create_service_signer_cert.yml
+# oc adm migrate storage should be run prior to etcd v3 upgrade
+# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
+- name: Pre master upgrade - Upgrade all storage
+ hosts: oo_first_master
+ tasks:
+ - name: Upgrade all storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=* --confirm
+ register: l_pb_upgrade_control_plane_pre_upgrade_storage
+ when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+ failed_when:
+ - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+ - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
+ - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
+
# Set openshift_master_facts separately. In order to reconcile
# admission_config's, we currently must run openshift_master_facts and
# then run openshift_facts.
@@ -153,7 +153,9 @@
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=clusterpolicies --confirm
register: l_pb_upgrade_control_plane_post_upgrade_storage
- when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ - openshift_version | version_compare('3.7','<')
failed_when:
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
deleted file mode 100644
index 5e7a66171..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.burst'
- yaml_value: 400
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.qps'
- yaml_value: 200
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.burst'
- yaml_value: 600
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.qps'
- yaml_value: 300
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
deleted file mode 100644
index 89b524f14..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.burst'
- yaml_value: 40
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.qps'
- yaml_value: 20
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
deleted file mode 100644
index a241ef039..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
deleted file mode 100644
index 54c85f0fb..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
deleted file mode 100644
index cee4e9087..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ /dev/null
@@ -1,113 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
deleted file mode 100644
index 52458e03c..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
deleted file mode 100644
index ae217ba2e..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ /dev/null
@@ -1,116 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../upgrade_nodes.yml
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
deleted file mode 100644
index d7cb38d03..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
deleted file mode 100644
index 8531e6045..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ /dev/null
@@ -1,111 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
deleted file mode 100644
index 52458e03c..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/roles b/playbooks/common/openshift-cluster/upgrades/v3_5/roles
deleted file mode 120000
index 415645be6..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
deleted file mode 100644
index bda245fe1..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ /dev/null
@@ -1,120 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_5/master_config_upgrade.yml"
-
-- include: ../upgrade_nodes.yml
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
deleted file mode 100644
index 6cdea7b84..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ /dev/null
@@ -1,122 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_5/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
deleted file mode 100644
index e29d0f8e6..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ /dev/null
@@ -1,111 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
deleted file mode 100644
index ae63c9ca9..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-###############################################################################
-# Pre upgrade checks for known data problems, if this playbook fails you should
-# contact support. If you're not supported contact users@lists.openshift.com
-#
-# oc_objectvalidator provides these two checks
-# 1 - SDN Data issues, never seen in the wild but known possible due to code audits
-# https://github.com/openshift/origin/issues/12697
-# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934
-#
-###############################################################################
-- name: Verify 3.5 specific upgrade checks
- hosts: oo_first_master
- roles:
- - { role: lib_openshift }
- tasks:
- - name: Check for invalid namespaces and SDN errors
- oc_objectvalidator:
-
- # What's all this PetSet business about?
- #
- # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are
- # no longer supported. The BETA resource 'StatefulSets' replaces
- # them. We can't migrate clients PetSets to
- # StatefulSets. Additionally, Red Hat has never officially supported
- # these resource types. Sorry users, but if you were using
- # unsupported resources from the Kube documentation then we can't
- # help you at this time.
- #
- # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229
- - name: Check if legacy PetSets exist
- oc_obj:
- state: list
- all_namespaces: true
- kind: petsets
- register: l_do_petsets_exist
-
- - name: Fail on unsupported resource migration 'PetSets'
- fail:
- msg: >
- PetSet objects were detected in your cluster. These are an
- Alpha feature in upstream Kubernetes 1.4 and are not supported
- by Red Hat. In Kubernetes 1.5, they are replaced by the Beta
- feature StatefulSets. Red Hat currently does not offer support
- for either PetSets or StatefulSets.
-
- Automatically migrating PetSets to StatefulSets in OpenShift
- Container Platform (OCP) 3.5 is not supported. See the
- Kubernetes "Upgrading from PetSets to StatefulSets"
- documentation for additional information:
-
- https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/
-
- PetSets MUST be removed before upgrading to OCP 3.5. Red Hat
- strongly recommends reading the above referenced documentation
- in its entirety before taking any destructive actions.
-
- If you want to simply remove all PetSets without manually
- migrating to StatefulSets, run this command as a user with
- cluster-admin privileges:
-
- $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false
- when:
- # Search did not fail, valid resource type found
- - l_do_petsets_exist.results.returncode == 0
- # Items do exist in the search results
- - l_do_petsets_exist.results.results.0['items'] | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index 7a28eeb27..74d0cd8ad 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -11,13 +11,15 @@
tasks:
- name: Check for invalid namespaces and SDN errors
oc_objectvalidator:
-
+ # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO
- name: Confirm OpenShift authorization objects are in sync
command: >
{{ openshift.common.client_binary }} adm migrate authorization
- when: openshift_currently_installed_version | version_compare('3.7','<')
+ when:
+ - openshift_currently_installed_version | version_compare('3.7','<')
+ - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool
changed_when: false
register: l_oc_result
until: l_oc_result.rc == 0
- retries: 4
+ retries: 2
delay: 15
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index c086c28df..224844a06 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -2,6 +2,8 @@
docker_cli_auth_config_path: '/root/.docker'
openshift_docker_signature_verification: False
+openshift_docker_alternative_creds: False
+
# oreg_url is defined by user input.
oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
oreg_auth_credentials_replace: False
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index 62b8a2eb5..d5faae8df 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -12,3 +12,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index c1aedf879..8121163a6 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -154,6 +154,7 @@
- set_fact:
docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
-- include: registry_auth.yml
-
- meta: flush_handlers
+
+# This needs to run after docker is restarted to account for proxy settings.
+- include: registry_auth.yml
diff --git a/roles/docker/tasks/registry_auth.yml b/roles/docker/tasks/registry_auth.yml
index d05b7f2b8..2c7bc5711 100644
--- a/roles/docker/tasks/registry_auth.yml
+++ b/roles/docker/tasks/registry_auth.yml
@@ -12,5 +12,21 @@
delay: 5
until: openshift_docker_credentials_create_res.rc == 0
when:
+ - not openshift_docker_alternative_creds | bool
+ - oreg_auth_user is defined
+ - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+
+# docker_creds is a custom module from lib_utils
+# 'docker login' requires a docker.service running on the local host, this is an
+# alternative implementation for non-docker hosts. This implementation does not
+# check the registry to determine whether or not the credentials will work.
+- name: Create credentials for docker cli registry auth (alternative)
+ docker_creds:
+ path: "{{ docker_cli_auth_config_path }}"
+ registry: "{{ oreg_host }}"
+ username: "{{ oreg_auth_user }}"
+ password: "{{ oreg_auth_password }}"
+ when:
+ - openshift_docker_alternative_creds | bool
- oreg_auth_user is defined
- (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 1e2d64293..3fe10454d 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -179,3 +179,9 @@
register: start_result
- meta: flush_handlers
+
+# If we are using crio only, docker.service might not be available for
+# 'docker login'
+- include: registry_auth.yml
+ vars:
+ openshift_docker_alternative_creds: "{{ l_use_crio_only }}"
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index aa3b35ddd..84220fa66 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -173,6 +173,10 @@
- set_fact:
docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
-- include: registry_auth.yml
-
- meta: flush_handlers
+
+# Since docker is running as a system container, docker login will fail to create
+# credentials. Use alternate method if requiring authenticated registries.
+- include: registry_auth.yml
+ vars:
+ openshift_docker_alternative_creds: True
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 0b6a8436b..0cea07256 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -1556,20 +1556,7 @@ def main():
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
- kind=dict(required=True,
- type='str',
- choices=['dc', 'deploymentconfig',
- 'rc', 'replicationcontroller',
- 'svc', 'service',
- 'scc', 'securitycontextconstraints',
- 'ns', 'namespace', 'project', 'projects',
- 'is', 'imagestream',
- 'istag', 'imagestreamtag',
- 'bc', 'buildconfig',
- 'routes',
- 'node',
- 'secret',
- 'pv', 'persistentvolume']),
+ kind=dict(required=True, type='str'),
file_name=dict(default=None, type='str'),
file_format=dict(default='yaml', type='str'),
content=dict(default=None, required=True, type='dict'),
diff --git a/roles/lib_openshift/src/ansible/oc_edit.py b/roles/lib_openshift/src/ansible/oc_edit.py
index 5c5954747..221047393 100644
--- a/roles/lib_openshift/src/ansible/oc_edit.py
+++ b/roles/lib_openshift/src/ansible/oc_edit.py
@@ -15,20 +15,7 @@ def main():
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
- kind=dict(required=True,
- type='str',
- choices=['dc', 'deploymentconfig',
- 'rc', 'replicationcontroller',
- 'svc', 'service',
- 'scc', 'securitycontextconstraints',
- 'ns', 'namespace', 'project', 'projects',
- 'is', 'imagestream',
- 'istag', 'imagestreamtag',
- 'bc', 'buildconfig',
- 'routes',
- 'node',
- 'secret',
- 'pv', 'persistentvolume']),
+ kind=dict(required=True, type='str'),
file_name=dict(default=None, type='str'),
file_format=dict(default='yaml', type='str'),
content=dict(default=None, required=True, type='dict'),
diff --git a/roles/lib_utils/library/docker_creds.py b/roles/lib_utils/library/docker_creds.py
new file mode 100644
index 000000000..d4674845e
--- /dev/null
+++ b/roles/lib_utils/library/docker_creds.py
@@ -0,0 +1,207 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+#
+# Copyright 2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+DOCUMENTATION = '''
+---
+module: docker_creds
+
+short_description: Creates/updates a 'docker login' file in place of using 'docker login'
+
+version_added: "2.4"
+
+description:
+ - This module creates a docker config.json file in the directory provided by 'path'
+ on hosts that do not support 'docker login' but need the file present for
+ registry authentication purposes of various other services.
+
+options:
+ path:
+ description:
+ - This is the message to send to the sample module
+ required: true
+ registry:
+ description:
+ - This is the registry the credentials are for.
+ required: true
+ username:
+ description:
+ - This is the username to authenticate to the registry with.
+ required: true
+ password:
+ description:
+ - This is the password to authenticate to the registry with.
+ required: true
+
+author:
+ - "Michael Gugino <mgugino@redhat.com>"
+'''
+
+EXAMPLES = '''
+# Pass in a message
+- name: Place credentials in file
+ docker_creds:
+ path: /root/.docker
+ registry: registry.example.com:443
+ username: myuser
+ password: mypassword
+'''
+
+
+def check_dest_dir_exists(module, dest):
+ '''Check if dest dir is present and is a directory'''
+ dir_exists = os.path.exists(dest)
+ if dir_exists:
+ if not os.path.isdir(dest):
+ msg = "{} exists but is not a directory".format(dest)
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': msg,
+ 'state': 'unknown'}
+ module.fail_json(**result)
+ else:
+ return 1
+ else:
+ return 0
+
+
+def create_dest_dir(module, dest):
+ try:
+ os.makedirs(dest, mode=0o700)
+ except OSError as oserror:
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': str(oserror),
+ 'state': 'unknown'}
+ module.fail_json(**result)
+
+
+def load_config_file(module, dest):
+ '''load the config.json in directory dest'''
+ conf_file_path = os.path.join(dest, 'config.json')
+ if os.path.exists(conf_file_path):
+ # Try to open the file and load json data
+ try:
+ with open(conf_file_path) as conf_file:
+ data = conf_file.read()
+ jdata = json.loads(data)
+
+ except IOError as ioerror:
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': str(ioerror),
+ 'state': 'unknown'}
+ module.fail_json(**result)
+ except ValueError as jsonerror:
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': str(jsonerror),
+ 'state': 'unknown'}
+ module.fail_json(**result)
+ return jdata
+ else:
+ # File doesn't exist, we just return an empty dictionary.
+ return {}
+
+
+def update_config(docker_config, registry, username, password):
+ '''Add our registry auth credentials into docker_config dict'''
+
+ # Add anything that might be missing in our dictionary
+ if 'auths' not in docker_config:
+ docker_config['auths'] = {}
+ if registry not in docker_config['auths']:
+ docker_config['auths'][registry] = {}
+
+ # base64 encode our username:password string
+ encoded_data = base64.b64encode('{}:{}'.format(username, password))
+
+ # check if the same value is already present for idempotency.
+ if 'auth' in docker_config['auths'][registry]:
+ if docker_config['auths'][registry]['auth'] == encoded_data:
+ # No need to go further, everything is already set in file.
+ return False
+ docker_config['auths'][registry]['auth'] = encoded_data
+ return True
+
+
+def write_config(module, docker_config, dest):
+ '''Write updated credentials into dest/config.json'''
+ conf_file_path = os.path.join(dest, 'config.json')
+ try:
+ with open(conf_file_path, 'w') as conf_file:
+ json.dump(docker_config, conf_file, indent=8)
+ except IOError as ioerror:
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': str(ioerror),
+ 'state': 'unknown'}
+ module.fail_json(**result)
+
+
+def run_module():
+ '''Run this module'''
+ module_args = dict(
+ path=dict(aliases=['dest', 'name'], required=True, type='path'),
+ registry=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True)
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False
+ )
+
+ # First, create our dest dir if necessary
+ dest = module.params['path']
+ registry = module.params['registry']
+ username = module.params['username']
+ password = module.params['password']
+
+ if not check_dest_dir_exists(module, dest):
+ create_dest_dir(module, dest)
+ docker_config = {}
+ else:
+ # We want to scrape the contents of dest/config.json
+ # in case there are other registries/settings already present.
+ docker_config = load_config_file(module, dest)
+
+ # Put the registry auth info into the config dict.
+ changed = update_config(docker_config, registry, username, password)
+
+ if changed:
+ write_config(module, docker_config, dest)
+
+ result = {'changed': changed}
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 51f7d31c2..c9a429675 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -1,6 +1,7 @@
---
openshift_aws_create_s3: True
openshift_aws_create_iam_cert: True
+openshift_aws_create_iam_role: False
openshift_aws_create_security_groups: True
openshift_aws_create_launch_config: True
openshift_aws_create_scale_group: True
@@ -17,6 +18,10 @@ openshift_aws_iam_cert_path: ''
openshift_aws_iam_cert_key_path: ''
openshift_aws_scale_group_basename: "{{ openshift_aws_clusterid }} openshift"
+openshift_aws_iam_role_name: openshift_node_describe_instances
+openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}"
+openshift_aws_iam_role_policy_name: "describe_instances"
+
openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
openshift_aws_ami: ''
openshift_aws_ami_copy_wait: False
@@ -135,6 +140,9 @@ openshift_aws_master_group_config:
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+ iam_role: "{{ openshift_aws_iam_role_name }}"
+ policy_name: "{{ openshift_aws_iam_role_policy_name }}"
+ policy_json: "{{ openshift_aws_iam_role_policy_json }}"
elbs: "{{ openshift_aws_elb_name_dict['master'].keys()| map('extract', openshift_aws_elb_name_dict['master']) | list }}"
openshift_aws_node_group_config:
@@ -155,6 +163,9 @@ openshift_aws_node_group_config:
type: compute
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+ iam_role: "{{ openshift_aws_iam_role_name }}"
+ policy_name: "{{ openshift_aws_iam_role_policy_name }}"
+ policy_json: "{{ openshift_aws_iam_role_policy_json }}"
# The 'infra' key is always required here.
infra:
instance_type: m4.xlarge
@@ -172,6 +183,9 @@ openshift_aws_node_group_config:
type: infra
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+ iam_role: "{{ openshift_aws_iam_role_name }}"
+ policy_name: "{{ openshift_aws_iam_role_policy_name }}"
+ policy_json: "{{ openshift_aws_iam_role_policy_json }}"
elbs: "{{ openshift_aws_elb_name_dict['infra'].keys()| map('extract', openshift_aws_elb_name_dict['infra']) | list }}"
openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}"
diff --git a/roles/openshift_aws/files/describeinstances.json b/roles/openshift_aws/files/describeinstances.json
new file mode 100644
index 000000000..40de49721
--- /dev/null
+++ b/roles/openshift_aws/files/describeinstances.json
@@ -0,0 +1,15 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Action": [
+ "ec2:DescribeInstances"
+ ],
+ "Resource": [
+ "*"
+ ],
+ "Effect": "Allow",
+ "Sid": "Stmt1438195894000"
+ }
+ ]
+}
diff --git a/roles/openshift_aws/files/trustpolicy.json b/roles/openshift_aws/files/trustpolicy.json
new file mode 100644
index 000000000..87c7d7c42
--- /dev/null
+++ b/roles/openshift_aws/files/trustpolicy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 852adc7b5..7e8e9b679 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -27,6 +27,9 @@
- set_fact:
l_epoch_time: "{{ ansible_date_time.epoch }}"
+- when: openshift_aws_create_iam_role
+ include: iam_role.yml
+
- when: openshift_aws_create_launch_config
include: launch_config.yml
diff --git a/roles/openshift_aws/tasks/iam_role.yml b/roles/openshift_aws/tasks/iam_role.yml
new file mode 100644
index 000000000..d9910d938
--- /dev/null
+++ b/roles/openshift_aws/tasks/iam_role.yml
@@ -0,0 +1,36 @@
+---
+#####
+# Instance profiles consist of two parts. The first part is creating a role
+# in which the instance has access and will use this role's permissions
+# to make API calls on his behalf. This role requires a trust policy
+# which links a service (ec2) to the role. This states that this role
+# has access to make call ec2 API calls.
+# See ../files/trustpolicy.json
+#
+# Currently openshift-node requires
+# access to the AWS API to call describeinstances.
+# https://bugzilla.redhat.com/show_bug.cgi?id=1510519
+#####
+- name: Create an iam role
+ iam_role:
+ name: "{{ item.value.iam_role }}"
+ assume_role_policy_document: "{{ lookup('file','trustpolicy.json') }}"
+ state: "{{ openshift_aws_iam_role_state | default('present') }}"
+ when: item.value.iam_role is defined
+ with_dict: "{{ l_nodes_to_build }}"
+
+#####
+# The second part of this task file is linking the role to a policy
+# that specifies which calls the role can make to the ec2 API.
+# Currently all that is required is DescribeInstances.
+# See ../files/describeinstances.json
+#####
+- name: create an iam policy
+ iam_policy:
+ iam_type: role
+ iam_name: "{{ item.value.iam_role }}"
+ policy_json: "{{ item.value.policy_json }}"
+ policy_name: "{{ item.value.policy_name }}"
+ state: "{{ openshift_aws_iam_role_state | default('present') }}"
+ when: item.value.iam_role is defined
+ with_dict: "{{ l_nodes_to_build }}"
diff --git a/roles/openshift_aws/tasks/launch_config_create.yml b/roles/openshift_aws/tasks/launch_config_create.yml
index 8265c2179..a688496d2 100644
--- a/roles/openshift_aws/tasks/launch_config_create.yml
+++ b/roles/openshift_aws/tasks/launch_config_create.yml
@@ -15,6 +15,10 @@
image_id: "{{ l_aws_ami_map[launch_config_item.key] | default(openshift_aws_ami) }}"
instance_type: "{{ launch_config_item.value.instance_type }}"
security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
+ instance_profile_name: "{{ launch_config_item.value.iam_role if launch_config_item.value.iam_role is defined and
+ launch_config_item.value.iam_role != '' and
+ openshift_aws_create_iam_role
+ else omit }}"
user_data: "{{ lookup('template', 'user_data.j2') }}"
key_name: "{{ openshift_aws_ssh_key_name }}"
ebs_optimized: False
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index a27fbae7e..97a8735ee 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -31,6 +31,7 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur
oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
+openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
containerized_svc_dir: "/usr/lib/systemd/system"
ha_svc_template_path: "native-cluster"
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index cde01c49e..c95f562d0 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -8,6 +8,7 @@
- name: Create credentials for registry auth
command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
when:
+ - not (openshift_docker_alternative_creds | default(False))
- oreg_auth_user is defined
- (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: master_oreg_auth_credentials_create
@@ -18,6 +19,25 @@
- restart master api
- restart master controllers
+# docker_creds is a custom module from lib_utils
+# 'docker login' requires a docker.service running on the local host, this is an
+# alternative implementation for non-docker hosts. This implementation does not
+# check the registry to determine whether or not the credentials will work.
+- name: Create credentials for registry auth (alternative)
+ docker_creds:
+ path: "{{ oreg_auth_credentials_path }}"
+ registry: "{{ oreg_host }}"
+ username: "{{ oreg_auth_user }}"
+ password: "{{ oreg_auth_password }}"
+ when:
+ - openshift_docker_alternative_creds | default(False) | bool
+ - oreg_auth_user is defined
+ - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: master_oreg_auth_credentials_create
+ notify:
+ - restart master api
+ - restart master controllers
+
# Container images may need the registry credentials
- name: Setup ro mount of /root/.docker for containerized hosts
set_fact:
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 0c6d8db38..89d154ad7 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -85,6 +85,7 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur
oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
+openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
# NOTE
# r_openshift_node_*_default may be defined external to this role.
@@ -115,3 +116,5 @@ openshift_node_config_dir: "{{ openshift_node_config_dir_default }}"
openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}"
+
+openshift_node_use_instance_profiles: False
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index 8cf41ab4c..b8be50f6c 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -34,6 +34,7 @@
- name: include aws sysconfig credentials
include: aws.yml
static: yes
+ when: not (openshift_node_use_instance_profiles | default(False))
#- name: update the ExecStart to have bootstrap
# lineinfile:
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index c08f43118..2fea33454 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -49,6 +49,7 @@
- name: include aws provider credentials
include: aws.yml
static: yes
+ when: not (openshift_node_use_instance_profiles | default(False))
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
index 5e5e4f94a..f5428867a 100644
--- a/roles/openshift_node/tasks/registry_auth.yml
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -8,6 +8,7 @@
- name: Create credentials for registry auth
command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
when:
+ - not (openshift_docker_alternative_creds | default(False))
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: node_oreg_auth_credentials_create
@@ -17,6 +18,24 @@
notify:
- restart node
+# docker_creds is a custom module from lib_utils
+# 'docker login' requires a docker.service running on the local host, this is an
+# alternative implementation for non-docker hosts. This implementation does not
+# check the registry to determine whether or not the credentials will work.
+- name: Create credentials for registry auth (alternative)
+ docker_creds:
+ path: "{{ oreg_auth_credentials_path }}"
+ registry: "{{ oreg_host }}"
+ username: "{{ oreg_auth_user }}"
+ password: "{{ oreg_auth_password }}"
+ when:
+ - openshift_docker_alternative_creds | bool
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: node_oreg_auth_credentials_create
+ notify:
+ - restart node
+
# Container images may need the registry credentials
- name: Setup ro mount of /root/.docker for containerized hosts
set_fact:
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
index 10b4c6977..1da434e6f 100644
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ b/roles/openshift_node_upgrade/defaults/main.yml
@@ -12,3 +12,4 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur
oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
+openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml
index 5e5e4f94a..f5428867a 100644
--- a/roles/openshift_node_upgrade/tasks/registry_auth.yml
+++ b/roles/openshift_node_upgrade/tasks/registry_auth.yml
@@ -8,6 +8,7 @@
- name: Create credentials for registry auth
command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
when:
+ - not (openshift_docker_alternative_creds | default(False))
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: node_oreg_auth_credentials_create
@@ -17,6 +18,24 @@
notify:
- restart node
+# docker_creds is a custom module from lib_utils
+# 'docker login' requires a docker.service running on the local host, this is an
+# alternative implementation for non-docker hosts. This implementation does not
+# check the registry to determine whether or not the credentials will work.
+- name: Create credentials for registry auth (alternative)
+ docker_creds:
+ path: "{{ oreg_auth_credentials_path }}"
+ registry: "{{ oreg_host }}"
+ username: "{{ oreg_auth_user }}"
+ password: "{{ oreg_auth_password }}"
+ when:
+ - openshift_docker_alternative_creds | bool
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: node_oreg_auth_credentials_create
+ notify:
+ - restart node
+
# Container images may need the registry credentials
- name: Setup ro mount of /root/.docker for containerized hosts
set_fact:
diff --git a/utils/etc/ansible.cfg b/utils/etc/ansible.cfg
index f7e6fe2ff..562f6544d 100644
--- a/utils/etc/ansible.cfg
+++ b/utils/etc/ansible.cfg
@@ -29,6 +29,10 @@ deprecation_warnings = False
# ssh_args - set if provided by user (cli)
# control_path
+[inventory]
+# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
+unparsed_is_failed=true
+
# Additional ssh options for OpenShift Ansible
[ssh_connection]
# shorten the ControlPath which is often too long; when it is,
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 65a481b10..1226242d0 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -25,21 +25,15 @@ QUIET_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible-quiet.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
UPGRADE_MAPPINGS = {
- '3.4': {
- 'minor_version': '3.4',
- 'minor_playbook': 'v3_4/upgrade.yml',
- 'major_playbook': 'v3_5/upgrade.yml',
- 'major_version': '3.5',
- },
- '3.5': {
- 'minor_version': '3.5',
- 'minor_playbook': 'v3_5/upgrade.yml',
- 'major_playbook': 'v3_6/upgrade.yml',
- 'major_version': '3.6',
- },
'3.6': {
'minor_version': '3.6',
'minor_playbook': 'v3_6/upgrade.yml',
+ 'major_playbook': 'v3_7/upgrade.yml',
+ 'major_version': '3.7',
+ },
+ '3.7': {
+ 'minor_version': '3.7',
+ 'minor_playbook': 'v3_7/upgrade.yml',
},
}