summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--openshift-ansible.spec58
-rw-r--r--playbooks/byo/openshift-cluster/config.yml17
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml2
-rw-r--r--playbooks/byo/openshift-etcd/migrate.yml118
-rw-r--r--playbooks/common/openshift-checks/health.yml5
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml5
-rw-r--r--playbooks/common/openshift-cluster/config.yml19
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml9
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml120
-rw-r--r--playbooks/common/openshift-master/config.yml39
-rw-r--r--roles/ansible_service_broker/tasks/install.yml7
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml1
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml1
-rw-r--r--roles/etcd_migrate/tasks/check.yml2
-rw-r--r--roles/etcd_migrate/tasks/check_cluster_health.yml2
-rw-r--r--roles/etcd_migrate/tasks/check_cluster_status.yml8
-rw-r--r--roles/etcd_migrate/tasks/migrate.yml17
-rw-r--r--roles/openshift_ca/tasks/main.yml59
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml4
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml6
-rw-r--r--roles/openshift_logging/defaults/main.yml2
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_mux/templates/mux.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/templates/master_docker/master.docker.service.j22
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml2
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_node_upgrade/tasks/restart.yml6
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml8
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml4
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j22
-rw-r--r--roles/openshift_version/tasks/main.yml196
-rw-r--r--test/integration/openshift_health_checker/setup_container.yml3
35 files changed, 427 insertions, 309 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index a83752c29..65e17d2d9 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.6.123-1 ./
+3.6.128-1 ./
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 1fcc9990c..97e17412f 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.6.123.1000
+Version: 3.6.128
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -280,6 +280,62 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Jun 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.128-1
+- parameterize etcd binary path (fabian@fabianism.us)
+- attach leases via the first master only and only once (jchaloup@redhat.com)
+- evalute groups when running etcd upgrade from byo/openshift-
+ cluster/upgrades/upgrade_etcd.yml (jchaloup@redhat.com)
+- Bug 1465168 - mux doesn't recognize ansible boolean parameters correctly
+ (rmeggins@redhat.com)
+
+* Tue Jun 27 2017 Scott Dodson <sdodson@redhat.com> 3.6.123.1003-1
+- Generate loopback kubeconfig separately to preserve OpenShift CA certificate.
+ (abutcher@redhat.com)
+- registry: look for the oc executable in /usr/local/bin and ~/bin
+ (gscrivan@redhat.com)
+- router: look for the oc executable in /usr/local/bin and ~/bin
+ (gscrivan@redhat.com)
+- Retry docker startup once (sdodson@redhat.com)
+
+* Tue Jun 27 2017 Scott Dodson <sdodson@redhat.com> 3.6.123.1002-1
+- Fix typo in fluentd_secureforward_contents variable
+ (Andreas.Dembach@dg-i.net)
+- Reverting quotation change in ansible_service_broker install for etcd
+ (ewolinet@redhat.com)
+
+* Mon Jun 26 2017 Scott Dodson <sdodson@redhat.com> 3.6.123.1001-1
+- oc_atomic_container: use rpm to check the version. (gscrivan@redhat.com)
+- Fix .spec for stagecut (jupierce@redhat.com)
+- Picking change from sdodson (ewolinet@redhat.com)
+- openshift_version: skip nfs and lb hosts (smilner@redhat.com)
+- openshift_checks: eval groups before including role (lmeyer@redhat.com)
+- Adding volume fact for etcd for openshift ansible service broker
+ (ewolinet@redhat.com)
+- Updating to label node and wait for apiservice to be healthy and started
+ (ewolinet@redhat.com)
+- Also configure default registry on HA masters (sdodson@redhat.com)
+- Fix parsing certs with very large serial numbers (tbielawa@redhat.com)
+- fix yamllint issues (fabian@fabianism.us)
+- openshift_logging: use empty default for storage labels (fsimonce@redhat.com)
+- Set clean install and etcd storage on first master to fix scaleup
+ (sdodson@redhat.com)
+- images, syscontainer: change default value for ANSIBLE_CONFIG
+ (gscrivan@redhat.com)
+- Cleanup/updates for env variables and etcd image (fabian@fabianism.us)
+- Sync 3.5 cfme templates over to 3.6 (sdodson@redhat.com)
+- Moving checks down after required initialization happens.
+ (kwoodson@redhat.com)
+- add play and role to install ansible-service-broker (fabian@fabianism.us)
+- Creation of service_catalog and placeholder broker roles
+ (ewolinet@redhat.com)
+- GlusterFS: Use proper namespace for heketi command and service account
+ (jarrpa@redhat.com)
+- Fixing quote issue. (kwoodson@redhat.com)
+- GlusterFS: Fix heketi secret name (jarrpa@redhat.com)
+- Fix for dynamic pvs when using storageclasses. (kwoodson@redhat.com)
+- Ensure that host pki tree is mounted in containerized components
+ (sdodson@redhat.com)
+
* Fri Jun 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.123-1
- releases: enable build/push with multiple tags (lmeyer@redhat.com)
- Update template examples for 3.6 (rteague@redhat.com)
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 9c5948552..acf5469bf 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -3,23 +3,6 @@
tags:
- always
-- name: Verify Requirements
- hosts: OSEv3
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: "install"
- post_tasks:
- - action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - package_availability
- - package_version
- - docker_image_availability
- - docker_storage
-
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
index 8005a17a3..5bd5d64ab 100644
--- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
@@ -1,4 +1,6 @@
---
- include: ../initialize_groups.yml
+- include: ../../../common/openshift-cluster/evaluate_groups.yml
+
- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml
index fd02e066e..143016159 100644
--- a/playbooks/byo/openshift-etcd/migrate.yml
+++ b/playbooks/byo/openshift-etcd/migrate.yml
@@ -3,122 +3,6 @@
tags:
- always
-- include: ../../common/openshift-cluster/evaluate_groups.yml
+- include: ../../common/openshift-etcd/migrate.yml
tags:
- always
-
-- name: Run pre-checks
- hosts: oo_etcd_to_config
- tags:
- - always
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: check
- etcd_peer: "{{ ansible_default_ipv4.address }}"
-
-# TODO(jchaloup): replace the std_include with something minimal so the entire playbook is faster
-# e.g. I don't need to detect the OCP version, install deps, etc.
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- name: Backup v2 data
- hosts: oo_etcd_to_config
- gather_facts: no
- tags:
- - always
- roles:
- - role: openshift_facts
- - role: etcd_common
- r_etcd_common_action: backup
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_backup_tag: pre-migration
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
-
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_to_config)
- | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.oo_etcd_to_config | difference(etcd_backup_completed) }}"
- - fail:
- msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when:
- - etcd_backup_failed | length > 0
-
-- name: Prepare masters for etcd data migration
- hosts: oo_masters_to_config
- tasks:
- - set_fact:
- master_services:
- - "{{ openshift.common.service_type + '-master' }}"
- - set_fact:
- master_services:
- - "{{ openshift.common.service_type + '-master-controllers' }}"
- - "{{ openshift.common.service_type + '-master-api' }}"
- when:
- - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool
- - debug:
- msg: "master service name: {{ master_services }}"
- - name: Stop masters
- service:
- name: "{{ item }}"
- state: stopped
- with_items: "{{ master_services }}"
-
-- name: Migrate etcd data from v2 to v3
- hosts: oo_etcd_to_config
- gather_facts: no
- tags:
- - always
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: migrate
- etcd_peer: "{{ ansible_default_ipv4.address }}"
-
-- name: Gate on etcd migration
- hosts: oo_masters_to_config
- gather_facts: no
- tasks:
- - set_fact:
- etcd_migration_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_to_config)
- | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
- - set_fact:
- etcd_migration_failed: "{{ groups.oo_etcd_to_config | difference(etcd_migration_completed) }}"
-
-- name: Configure masters if etcd data migration is succesfull
- hosts: oo_masters_to_config
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: configure
- when: etcd_migration_failed | length == 0
- tasks:
- - debug:
- msg: "Skipping master re-configuration since migration failed."
- when:
- - etcd_migration_failed | length > 0
-
-- name: Start masters after etcd data migration
- hosts: oo_masters_to_config
- tasks:
- - name: Start master services
- service:
- name: "{{ item }}"
- state: started
- register: service_status
- # Sometimes the master-api, resp. master-controllers fails to start for the first time
- until: service_status.state is defined and service_status.state == "started"
- retries: 5
- delay: 10
- with_items: "{{ master_services[::-1] }}"
- - fail:
- msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}"
- when:
- - etcd_migration_failed | length > 0
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index 1bee460e8..c7766ff04 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,4 +1,9 @@
---
+# openshift_health_checker depends on openshift_version which now requires group eval.
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
- name: Run OpenShift health checks
hosts: OSEv3
roles:
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index e01c6f38d..7ca9f7e8b 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,4 +1,9 @@
---
+# openshift_health_checker depends on openshift_version which now requires group eval.
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
- hosts: OSEv3
name: run OpenShift pre-install checks
roles:
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 2a75b8dc3..7224ae712 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,4 +1,23 @@
---
+# TODO: refactor this into its own include
+# and pass a variable for ctx
+- name: Verify Requirements
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: "install"
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
- include: initialize_oo_option_facts.yml
tags:
- always
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index c28ce4c14..baca72c58 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -157,3 +157,12 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
changed_when: no
+
+ - name: Evaluate oo_etcd_to_migrate
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_migrate
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else groups.oo_first_master }}"
+ changed_when: no
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
new file mode 100644
index 000000000..c655449fa
--- /dev/null
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -0,0 +1,120 @@
+---
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
+- name: Run pre-checks
+ hosts: oo_etcd_to_migrate
+ tags:
+ - always
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: check
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+
+- include: ../openshift-cluster/initialize_facts.yml
+ tags:
+ - always
+
+- name: Backup v2 data
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ tags:
+ - always
+ roles:
+ - role: openshift_facts
+ - role: etcd_common
+ r_etcd_common_action: backup
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migration
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_migrate)
+ | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when:
+ - etcd_backup_failed | length > 0
+
+- name: Prepare masters for etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master' }}"
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master-controllers' }}"
+ - "{{ openshift.common.service_type + '-master-api' }}"
+ when:
+ - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool
+ - debug:
+ msg: "master service name: {{ master_services }}"
+ - name: Stop masters
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items: "{{ master_services }}"
+
+- name: Migrate etcd data from v2 to v3
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ tags:
+ - always
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: migrate
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+
+- name: Gate on etcd migration
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ etcd_migration_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_migrate)
+ | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
+ - set_fact:
+ etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}"
+
+- name: Configure masters if etcd data migration is succesfull
+ hosts: oo_masters_to_config
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: configure
+ when: etcd_migration_failed | length == 0
+ tasks:
+ - debug:
+ msg: "Skipping master re-configuration since migration failed."
+ when:
+ - etcd_migration_failed | length > 0
+
+- name: Start masters after etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - name: Start master services
+ service:
+ name: "{{ item }}"
+ state: started
+ register: service_status
+ # Sometimes the master-api, resp. master-controllers fails to start for the first time
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
+ with_items: "{{ master_services[::-1] }}"
+ - fail:
+ msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}"
+ when:
+ - etcd_migration_failed | length > 0
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 70108fb7a..7d3a371e3 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -20,25 +20,6 @@
- node
- .config_managed
- - name: Check for existing configuration
- stat:
- path: /etc/origin/master/master-config.yaml
- register: master_config_stat
-
- - name: Set clean install fact
- set_fact:
- l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
-
- - name: Determine if etcd3 storage is in use
- command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
- register: etcd3_grep
- failed_when: false
- changed_when: false
-
- - name: Set etcd3 fact
- set_fact:
- l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
-
- set_fact:
openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}"
when: openshift_master_pod_eviction_timeout is not defined
@@ -88,7 +69,7 @@
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
-- name: Determine if session secrets must be generated
+- name: Inspect state of first master session secrets and config
hosts: oo_first_master
roles:
- role: openshift_facts
@@ -98,6 +79,24 @@
local_facts:
session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+ - name: Check for existing configuration
+ stat:
+ path: /etc/origin/master/master-config.yaml
+ register: master_config_stat
+
+ - name: Set clean install fact
+ set_fact:
+ l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
+
+ - name: Determine if etcd3 storage is in use
+ command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
+ register: etcd3_grep
+ failed_when: false
+ changed_when: false
+
+ - name: Set etcd3 fact
+ set_fact:
+ l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
- name: Generate master session secrets
hosts: oo_first_master
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index b48583fd4..81c3f8e5b 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -14,6 +14,7 @@
ansible_service_broker_etcd_image_prefix: "{{ ansible_service_broker_etcd_image_prefix | default(__ansible_service_broker_etcd_image_prefix) }}"
ansible_service_broker_etcd_image_tag: "{{ ansible_service_broker_etcd_image_tag | default(__ansible_service_broker_etcd_image_tag) }}"
+ ansible_service_broker_etcd_image_etcd_path: "{{ ansible_service_broker_etcd_image_etcd_path | default(__ansible_service_broker_etcd_image_etcd_path) }}"
ansible_service_broker_registry_type: "{{ ansible_service_broker_registry_type | default(__ansible_service_broker_registry_type) }}"
ansible_service_broker_registry_url: "{{ ansible_service_broker_registry_url | default(__ansible_service_broker_registry_url) }}"
@@ -144,10 +145,10 @@
terminationMessagePath: /tmp/termination-log
workingDir: /etcd
args:
- - /usr/local/bin/etcd
+ - '{{ ansible_service_broker_etcd_image_etcd_path }}'
- --data-dir=/data
- - --listen-client-urls="http://0.0.0.0:2379"
- - --advertise-client-urls="http://0.0.0.0:2379"
+ - "--listen-client-urls=http://0.0.0.0:2379"
+ - "--advertise-client-urls=http://0.0.0.0:2379"
ports:
- containerPort: 2379
protocol: TCP
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
index b0b3835e3..15e448515 100644
--- a/roles/ansible_service_broker/vars/default_images.yml
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -5,6 +5,7 @@ __ansible_service_broker_image_tag: latest
__ansible_service_broker_etcd_image_prefix: quay.io/coreos/
__ansible_service_broker_etcd_image_tag: latest
+__ansible_service_broker_etcd_image_etcd_path: /usr/local/bin/etcd
__ansible_service_broker_registry_type: dockerhub
__ansible_service_broker_registry_url: null
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
index a6d999647..19b4a5147 100644
--- a/roles/ansible_service_broker/vars/openshift-enterprise.yml
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -5,6 +5,7 @@ __ansible_service_broker_image_tag: latest
__ansible_service_broker_etcd_image_prefix: rhel7/
__ansible_service_broker_etcd_image_tag: latest
+__ansible_service_broker_etcd_image_etcd_path: /bin/etcd
__ansible_service_broker_registry_type: rhcc
__ansible_service_broker_registry_url: "https://registry.access.redhat.com"
diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd_migrate/tasks/check.yml
index 2f07713bc..800073873 100644
--- a/roles/etcd_migrate/tasks/check.yml
+++ b/roles/etcd_migrate/tasks/check.yml
@@ -6,7 +6,7 @@
# Run the migration only if the data are v2
- name: Check if there are any v3 data
command: >
- etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:2379' get "" --from-key --keys-only -w json --limit 1
+ etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:{{ etcd_client_port }}' get "" --from-key --keys-only -w json --limit 1
environment:
ETCDCTL_API: 3
register: l_etcdctl_output
diff --git a/roles/etcd_migrate/tasks/check_cluster_health.yml b/roles/etcd_migrate/tasks/check_cluster_health.yml
index 1abd6a32f..201d83f99 100644
--- a/roles/etcd_migrate/tasks/check_cluster_health.yml
+++ b/roles/etcd_migrate/tasks/check_cluster_health.yml
@@ -1,7 +1,7 @@
---
- name: Check cluster health
command: >
- etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://{{ etcd_peer }}:2379 cluster-health
+ etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health
register: etcd_cluster_health
changed_when: false
failed_when: false
diff --git a/roles/etcd_migrate/tasks/check_cluster_status.yml b/roles/etcd_migrate/tasks/check_cluster_status.yml
index 90fe385c1..b69fb5a52 100644
--- a/roles/etcd_migrate/tasks/check_cluster_status.yml
+++ b/roles/etcd_migrate/tasks/check_cluster_status.yml
@@ -2,7 +2,7 @@
# etcd_ip originates from etcd_common role
- name: Check cluster status
command: >
- etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints 'https://{{ etcd_peer }}:2379' -w json endpoint status
+ etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:{{ etcd_client_port }}' -w json endpoint status
environment:
ETCDCTL_API: 3
register: l_etcd_cluster_status
@@ -15,7 +15,7 @@
# http://docs.ansible.com/ansible/playbooks_filters.html#extracting-values-from-containers
- name: Group all raftIndices into a list
set_fact:
- etcd_members_raft_indices: "{{ groups['oo_etcd_to_config'] | map('extract', hostvars, 'etcd_member_raft_index') | list | unique }}"
+ etcd_members_raft_indices: "{{ groups['oo_etcd_to_migrate'] | map('extract', hostvars, 'etcd_member_raft_index') | list | unique }}"
- name: Check the minimum and the maximum of raftIndices is at most 1
set_fact:
@@ -24,9 +24,9 @@
- debug:
msg: "Raft indices difference: {{ etcd_members_raft_indices_diff }}"
- when: inventory_hostname in groups.oo_etcd_to_config[0]
+ when: inventory_hostname in groups.oo_etcd_to_migrate[0]
# The cluster raft status is ok if the difference of the max and min raft index is at most 1
- name: capture the status
set_fact:
- l_etcd_cluster_status_ok: "{{ hostvars[groups.oo_etcd_to_config[0]]['etcd_members_raft_indices_diff'] | int < 2 }}"
+ l_etcd_cluster_status_ok: "{{ hostvars[groups.oo_etcd_to_migrate[0]]['etcd_members_raft_indices_diff'] | int < 2 }}"
diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd_migrate/tasks/migrate.yml
index cb479b0cc..27eb945aa 100644
--- a/roles/etcd_migrate/tasks/migrate.yml
+++ b/roles/etcd_migrate/tasks/migrate.yml
@@ -20,10 +20,12 @@
- name: Check the etcd v2 data are correctly migrated
fail:
msg: "Failed to migrate a member"
- when: "'finished transforming keys' not in l_etcdctl_migrate.stdout"
+ when: "'finished transforming keys' not in l_etcdctl_migrate.stdout and 'no v2 keys to migrate' not in l_etcdctl_migrate.stdout"
+
+- name: Migration message
+ debug:
+ msg: "Etcd migration finished with: {{ l_etcdctl_migrate.stdout }}"
-# TODO(jchaloup): start the etcd on a different port so noone can access it
-# Once the validation is done
- name: Enable etcd member
service:
name: "{{ l_etcd_service }}"
@@ -35,7 +37,7 @@
--cert {{ etcd_peer_cert_file }} \
--key {{ etcd_peer_key_file }} \
--cacert {{ etcd_peer_ca_file }} \
- --etcd-address 'https://{{ etcd_peer }}:2379' \
+ --etcd-address 'https://{{ etcd_peer }}:{{ etcd_client_port }}' \
--ttl-keys-prefix {{ item }} \
--lease-duration 1h
environment:
@@ -43,11 +45,8 @@
with_items:
- "/kubernetes.io/events"
- "/kubernetes.io/masterleases"
+ delegate_to: "{{ groups.oo_first_master[0] }}"
+ run_once: true
- set_fact:
r_etcd_migrate_success: true
-
-- name: Enable etcd member
- service:
- name: "{{ l_etcd_service }}"
- state: started
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index b9a7ec32f..419679bc2 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -117,25 +117,46 @@
delegate_to: "{{ openshift_ca_host }}"
run_once: true
-- name: Generate the loopback master client config
- command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
- {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
- --certificate-authority {{ named_ca_certificate }}
- {% endfor %}
- --certificate-authority={{ openshift_ca_cert }}
- --client-dir={{ openshift_ca_config_dir }}
- --groups=system:masters,system:openshift-master
- --master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
- --public-master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
- --signer-cert={{ openshift_ca_cert }}
- --signer-key={{ openshift_ca_key }}
- --signer-serial={{ openshift_ca_serial }}
- --user=system:openshift-master
- --basename=openshift-master
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
- --expire-days={{ openshift_master_cert_expire_days }}
- {% endif %}
+# create-api-client-config generates a ca.crt file which will
+# overwrite the OpenShift CA certificate. Generate the loopback
+# kubeconfig in a temporary directory and then copy files into the
+# master config dir to avoid overwriting ca.crt.
+- block:
+ - name: Create temp directory for loopback master client config
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: openshift_ca_loopback_tmpdir
+ - name: Generate the loopback master client config
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ --certificate-authority={{ openshift_ca_cert }}
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ --certificate-authority {{ named_ca_certificate }}
+ {% endfor %}
+ --client-dir={{ openshift_ca_loopback_tmpdir.stdout }}
+ --groups=system:masters,system:openshift-master
+ --master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --public-master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ --user=system:openshift-master
+ --basename=openshift-master
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_master_cert_expire_days }}
+ {% endif %}
+ - name: Copy generated loopback master client config to master config dir
+ copy:
+ src: "{{ openshift_ca_loopback_tmpdir.stdout }}/{{ item }}"
+ dest: "{{ openshift_ca_config_dir }}"
+ remote_src: true
+ with_items:
+ - openshift-master.crt
+ - openshift-master.key
+ - openshift-master.kubeconfig
+ - name: Delete temp directory
+ file:
+ name: "{{ openshift_ca_loopback_tmpdir.stdout }}"
+ state: absent
when: loopback_context_string not in loopback_config.stdout
delegate_to: "{{ openshift_ca_host }}"
run_once: true
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index d895e9a68..2eeb2e7ce 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -135,7 +135,7 @@
- name: Determine the latest version of the OpenShift registry deployment
command: |
- oc get deploymentconfig {{ openshift_hosted_registry_name }} \
+ {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \
--namespace {{ openshift_hosted_registry_namespace }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
-o jsonpath='{ .status.latestVersion }'
@@ -143,7 +143,7 @@
- name: Sanity-check that the OpenShift registry rolled out correctly
command: |
- oc get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \
+ {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \
--namespace {{ openshift_hosted_registry_namespace }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
-o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index 160ae2f5e..c60b67862 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -92,7 +92,7 @@
- name: Ensure OpenShift router correctly rolls out (best-effort today)
command: |
- oc rollout status deploymentconfig {{ item.name }} \
+ {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
--namespace {{ item.namespace | default('default') }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig
async: 600
@@ -102,7 +102,7 @@
- name: Determine the latest version of the OpenShift router deployment
command: |
- oc get deploymentconfig {{ item.name }} \
+ {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
--namespace {{ item.namespace }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
-o jsonpath='{ .status.latestVersion }'
@@ -111,7 +111,7 @@
- name: Poll for OpenShift router deployment success
command: |
- oc get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
+ {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
--namespace {{ item.0.namespace }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
-o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 66d880d23..9b7767ccd 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -89,7 +89,7 @@ openshift_logging_es_cpu_limit: null
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
-openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default(null) }}"
+openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default('') }}"
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 8194223e8..30b596e22 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -100,7 +100,7 @@
- copy:
src: secure-forward.conf
dest: "{{ tempdir }}/secure-forward.conf"
- when: fluentd_securefoward_contents is undefined
+ when: fluentd_secureforward_contents is undefined
changed_when: no
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 243698c6a..bd392c194 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -102,7 +102,7 @@ spec:
- name: USE_MUX
value: "true"
- name: MUX_ALLOW_EXTERNAL
- value: "{{ openshift_logging_mux_allow_external | default('false') }}"
+ value: "{{ openshift_logging_mux_allow_external | default('false') | lower }}"
- name: "BUFFER_QUEUE_LIMIT"
value: "{{ openshift_logging_mux_buffer_queue_limit }}"
- name: "BUFFER_SIZE_LIMIT"
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index 897ee7285..e8f7c47b0 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,7 +12,7 @@ Requires={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 451f3436a..69db62f16 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -11,7 +11,7 @@ PartOf={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2
index 7f40cb042..31c1dfc33 100644
--- a/roles/openshift_master/templates/master_docker/master.docker.service.j2
+++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2
@@ -8,7 +8,7 @@ Wants=etcd_container.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master
Restart=always
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 62413536b..d9ffb1b6f 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -74,10 +74,10 @@
- name: Generate the loopback master client config
command: >
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ --certificate-authority={{ openshift_ca_cert }}
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- --certificate-authority={{ openshift_ca_cert }}
--client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}
--groups=system:masters,system:openshift-master
--master={{ hostvars[item].openshift.master.loopback_api_url }}
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index d89b64b06..cd0a1a60b 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -17,7 +17,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node_upgrade/tasks/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml
index 508eb9358..b24d8cec7 100644
--- a/roles/openshift_node_upgrade/tasks/restart.yml
+++ b/roles/openshift_node_upgrade/tasks/restart.yml
@@ -16,7 +16,11 @@
- name: Restart docker
service:
name: "{{ openshift.docker.service_name }}"
- state: restarted
+ state: started
+ register: docker_start_result
+ until: not docker_start_result | failed
+ retries: 1
+ delay: 30
- name: Update docker facts
openshift_facts:
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 4406ef28b..af901103e 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -26,7 +26,7 @@
- kind: "sa"
name: "heketi-{{ glusterfs_name }}-service-account"
- kind: "secret"
- name: "heketi-{{ glusterfs_name }}-user-secret"
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
failed_when: False
when: glusterfs_heketi_wipe
@@ -66,6 +66,7 @@
- name: Add heketi service account to privileged SCC
oc_adm_policy_user:
+ namespace: "{{ glusterfs_namespace }}"
user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: scc
resource_name: privileged
@@ -74,6 +75,7 @@
- name: Allow heketi service account to view/edit pods
oc_adm_policy_user:
+ namespace: "{{ glusterfs_namespace }}"
user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: role
resource_name: edit
@@ -148,7 +150,7 @@
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
@@ -170,7 +172,7 @@
oc_secret:
namespace: "{{ glusterfs_namespace }}"
state: present
- name: "heketi-{{ glusterfs_name }}-secret"
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
type: "kubernetes.io/glusterfs"
force: True
contents:
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 26343b909..63009c539 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -4,7 +4,7 @@
register: setup_storage
- name: Copy heketi-storage list
- shell: "{{ openshift.common.client_binary }} rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
+ shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
# This is used in the subsequent task
- name: Copy the admin client config
@@ -125,7 +125,7 @@
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+ glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
index 5ea801e60..2ec9a9e9a 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -8,4 +8,4 @@ parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
restuser: "admin"
secretNamespace: "{{ glusterfs_namespace }}"
- secretName: "heketi-{{ glusterfs_name }}-secret"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 16792388f..f4cb8ddb2 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -84,115 +84,119 @@
- openshift_version is not defined
- openshift_protect_installed_version | bool
-- name: Set openshift_version for rpm installation
- include: set_version_rpm.yml
- when: not is_containerized | bool
-
-- name: Set openshift_version for containerized installation
- include: set_version_containerized.yml
- when: is_containerized | bool
-
-- block:
- - name: Get available {{ openshift.common.service_type}} version
- repoquery:
- name: "{{ openshift.common.service_type}}"
- ignore_excluders: true
- register: rpm_results
- - fail:
- msg: "Package {{ openshift.common.service_type}} not found"
- when: not rpm_results.results.package_found
- - set_fact:
- openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
- - name: Fail if rpm version and docker image version are different
- fail:
- msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
- # Both versions have the same string representation
+# The rest of these tasks should only execute on
+# masters and nodes as we can verify they have subscriptions
+- when:
+ - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
+ block:
+ - name: Set openshift_version for rpm installation
+ include: set_version_rpm.yml
+ when: not is_containerized | bool
+
+ - name: Set openshift_version for containerized installation
+ include: set_version_containerized.yml
+ when: is_containerized | bool
+
+ - block:
+ - name: Get available {{ openshift.common.service_type}} version
+ repoquery:
+ name: "{{ openshift.common.service_type}}"
+ ignore_excluders: true
+ register: rpm_results
+ - fail:
+ msg: "Package {{ openshift.common.service_type}} not found"
+ when: not rpm_results.results.package_found
+ - set_fact:
+ openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
+ - name: Fail if rpm version and docker image version are different
+ fail:
+ msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
+ # Both versions have the same string representation
+ when:
+ - openshift_rpm_version != openshift_version
+ # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
+ - openshift_pkg_version is not defined
+ - openshift_image_tag is not defined
when:
- - openshift_rpm_version != openshift_version
- # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
- - openshift_pkg_version is not defined
- - openshift_image_tag is not defined
- when:
- - is_containerized | bool
- - not is_atomic | bool
-
-# Warn if the user has provided an openshift_image_tag but is not doing a containerized install
-# NOTE: This will need to be modified/removed for future container + rpm installations work.
-- name: Warn if openshift_image_tag is defined when not doing a containerized install
- debug:
- msg: >
- openshift_image_tag is used for containerized installs. If you are trying to
- specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
- when:
- - not is_containerized | bool
- - openshift_image_tag is defined
-
+ - is_containerized | bool
+ - not is_atomic | bool
+
+ # Warn if the user has provided an openshift_image_tag but is not doing a containerized install
+ # NOTE: This will need to be modified/removed for future container + rpm installations work.
+ - name: Warn if openshift_image_tag is defined when not doing a containerized install
+ debug:
+ msg: >
+ openshift_image_tag is used for containerized installs. If you are trying to
+ specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
+ when:
+ - not is_containerized | bool
+ - openshift_image_tag is defined
-# At this point we know openshift_version is set appropriately. Now we set
-# openshift_image_tag and openshift_pkg_version, so all roles can always assume
-# each of this variables *will* be set correctly and can use them per their
-# intended purpose.
+ # At this point we know openshift_version is set appropriately. Now we set
+ # openshift_image_tag and openshift_pkg_version, so all roles can always assume
+ # each of this variables *will* be set correctly and can use them per their
+ # intended purpose.
-- block:
- - debug:
- msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
+ - block:
+ - debug:
+ msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
- - set_fact:
- openshift_image_tag: v{{ openshift_version }}
+ - set_fact:
+ openshift_image_tag: v{{ openshift_version }}
- when: openshift_image_tag is not defined
+ when: openshift_image_tag is not defined
-- block:
- - debug:
- msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
+ - block:
+ - debug:
+ msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
- - set_fact:
- openshift_pkg_version: -{{ openshift_version }}
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
- when: openshift_pkg_version is not defined
+ when: openshift_pkg_version is not defined
-- fail:
- msg: openshift_version role was unable to set openshift_version
- name: Abort if openshift_version was not set
- when: openshift_version is not defined
+ - fail:
+ msg: openshift_version role was unable to set openshift_version
+ name: Abort if openshift_version was not set
+ when: openshift_version is not defined
-- fail:
- msg: openshift_version role was unable to set openshift_image_tag
- name: Abort if openshift_image_tag was not set
- when: openshift_image_tag is not defined
+ - fail:
+ msg: openshift_version role was unable to set openshift_image_tag
+ name: Abort if openshift_image_tag was not set
+ when: openshift_image_tag is not defined
-- fail:
- msg: openshift_version role was unable to set openshift_pkg_version
- name: Abort if openshift_pkg_version was not set
- when: openshift_pkg_version is not defined
+ - fail:
+ msg: openshift_version role was unable to set openshift_pkg_version
+ name: Abort if openshift_pkg_version was not set
+ when: openshift_pkg_version is not defined
-- fail:
- msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
- name: Abort if openshift_pkg_version was not set
- when:
- - not is_containerized | bool
- - openshift_version == '0.0'
+ - fail:
+ msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
+ name: Abort if openshift_pkg_version was not set
+ when:
+ - not is_containerized | bool
+ - openshift_version == '0.0'
-# We can't map an openshift_release to full rpm version like we can with containers; make sure
-# the rpm version we looked up matches the release requested and error out if not.
-- name: For an RPM install, abort when the release requested does not match the available version.
- when:
- - not is_containerized | bool
- - openshift_release is defined
- assert:
- that:
- - openshift_version.startswith(openshift_release) | bool
- msg: |-
- You requested openshift_release {{ openshift_release }}, which is not matched by
- the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
- on host {{ inventory_hostname }}.
- We will only install the latest RPMs, so please ensure you are getting the release
- you expect. You may need to adjust your Ansible inventory, modify the repositories
- available on the host, or run the appropriate OpenShift upgrade playbook.
+ # We can't map an openshift_release to full rpm version like we can with containers; make sure
+ # the rpm version we looked up matches the release requested and error out if not.
+ - name: For an RPM install, abort when the release requested does not match the available version.
+ when:
+ - not is_containerized | bool
+ - openshift_release is defined
+ assert:
+ that:
+ - openshift_version.startswith(openshift_release) | bool
+ msg: |-
+ You requested openshift_release {{ openshift_release }}, which is not matched by
+ the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ on host {{ inventory_hostname }}.
+ We will only install the latest RPMs, so please ensure you are getting the release
+ you expect. You may need to adjust your Ansible inventory, modify the repositories
+ available on the host, or run the appropriate OpenShift upgrade playbook.
-# The end result of these three variables is quite important so make sure they are displayed and logged:
-- debug: var=openshift_release
+ # The end result of these three variables is quite important so make sure they are displayed and logged:
+ - debug: var=openshift_release
-- debug: var=openshift_image_tag
+ - debug: var=openshift_image_tag
-- debug: var=openshift_pkg_version
+ - debug: var=openshift_pkg_version
diff --git a/test/integration/openshift_health_checker/setup_container.yml b/test/integration/openshift_health_checker/setup_container.yml
index 8793d954e..33e94cf1f 100644
--- a/test/integration/openshift_health_checker/setup_container.yml
+++ b/test/integration/openshift_health_checker/setup_container.yml
@@ -43,3 +43,6 @@
delegate_facts: True
delegate_to: "{{ container_name }}"
with_dict: "{{ l_host_vars | default({}) }}"
+
+- include: ../../../playbooks/byo/openshift-cluster/initialize_groups.yml
+- include: ../../../playbooks/common/openshift-cluster/evaluate_groups.yml