summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--inventory/byo/hosts.origin.example4
-rw-r--r--inventory/byo/hosts.ose.example4
-rw-r--r--openshift-ansible.spec31
-rw-r--r--playbooks/byo/openshift-cluster/openshift-prometheus.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml1
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml1
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml2
-rw-r--r--playbooks/common/openshift-master/set_network_facts.yml8
-rw-r--r--roles/docker/tasks/main.yml20
-rw-r--r--roles/docker/tasks/package_docker.yml14
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml13
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml11
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_csr.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py2
-rw-r--r--roles/lib_openshift/library/oc_configmap.py2
-rw-r--r--roles/lib_openshift/library/oc_edit.py2
-rw-r--r--roles/lib_openshift/library/oc_env.py2
-rw-r--r--roles/lib_openshift/library/oc_group.py2
-rw-r--r--roles/lib_openshift/library/oc_image.py2
-rw-r--r--roles/lib_openshift/library/oc_label.py2
-rw-r--r--roles/lib_openshift/library/oc_obj.py2
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py2
-rw-r--r--roles/lib_openshift/library/oc_process.py2
-rw-r--r--roles/lib_openshift/library/oc_project.py2
-rw-r--r--roles/lib_openshift/library/oc_pvc.py2
-rw-r--r--roles/lib_openshift/library/oc_route.py2
-rw-r--r--roles/lib_openshift/library/oc_scale.py2
-rw-r--r--roles/lib_openshift/library/oc_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_service.py2
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py2
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_storageclass.py2
-rw-r--r--roles/lib_openshift/library/oc_user.py2
-rw-r--r--roles/lib_openshift/library/oc_version.py2
-rw-r--r--roles/lib_openshift/library/oc_volume.py2
-rw-r--r--roles/lib_openshift/src/lib/base.py2
-rw-r--r--roles/openshift_gcp/defaults/main.yml58
-rw-r--r--roles/openshift_gcp/templates/dns.j2.sh8
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh228
-rw-r--r--roles/openshift_gcp/templates/remove.j2.sh64
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py11
-rw-r--r--roles/openshift_logging/defaults/main.yml1
-rw-r--r--roles/openshift_logging/tasks/main.yaml2
-rw-r--r--roles/openshift_metrics/defaults/main.yaml1
-rw-r--r--roles/openshift_metrics/tasks/main.yaml2
-rw-r--r--roles/openshift_node/tasks/config.yml16
-rw-r--r--roles/openshift_node/tasks/main.yml16
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml2
55 files changed, 365 insertions, 217 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 1541b6a5f..d732f0388 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.138.0 ./
+3.7.0-0.142.0 ./
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 4a0630a69..c54d6bca7 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -425,6 +425,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# path using these options would be "/exports/registry"
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
+# nfs_directory must conform to DNS-1123 subdomain must consist of lower case
+# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
#openshift_hosted_registry_storage_nfs_directory=/exports
#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_registry_storage_volume_name=registry
@@ -437,6 +439,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
#openshift_hosted_registry_storage_host=nfs.example.com
+# nfs_directory must conform to DNS-1123 subdomain must consist of lower case
+# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
#openshift_hosted_registry_storage_nfs_directory=/exports
#openshift_hosted_registry_storage_volume_name=registry
#openshift_hosted_registry_storage_volume_size=10Gi
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 03fbcc63c..7c637b3cb 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -432,6 +432,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# path using these options would be "/exports/registry"
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
+# nfs_directory must conform to DNS-1123 subdomain must consist of lower case
+# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
#openshift_hosted_registry_storage_nfs_directory=/exports
#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_registry_storage_volume_name=registry
@@ -444,6 +446,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
#openshift_hosted_registry_storage_host=nfs.example.com
+# nfs_directory must conform to DNS-1123 subdomain must consist of lower case
+# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
#openshift_hosted_registry_storage_nfs_directory=/exports
#openshift_hosted_registry_storage_volume_name=registry
#openshift_hosted_registry_storage_volume_size=10Gi
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index b2db44b86..15d74a682 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.138.0%{?dist}
+Release: 0.142.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -276,6 +276,35 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Oct 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.142.0
+- Document that nfs_directory must conform to DNS-1123 (sdodson@redhat.com)
+- Move node aws credentials to config.yml (mgugino@redhat.com)
+- Use etcd_ip when communicating with the cluster as a peer in etcd scaleup.
+ (abutcher@redhat.com)
+- Ensure openshift.common.portal_net updated during scaleup.
+ (abutcher@redhat.com)
+- docker: fix some tox warnings (gscrivan@redhat.com)
+- Require openshift_image_tag in the inventory with openshift-enterprise
+ (gscrivan@redhat.com)
+- crio: use the image_tag on RHEL (gscrivan@redhat.com)
+- docker: use the image_tag on RHEL (gscrivan@redhat.com)
+
+* Tue Oct 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.141.0
+- Restore registires to /etc/sysconfig/docker (mgugino@redhat.com)
+- Fix Prometheus byo entry point (rteague@redhat.com)
+- Update to the openshift_aws style scheme for variables (ccoleman@redhat.com)
+
+* Tue Oct 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.140.0
+- openshift_checks: Fix incorrect list cast (smilner@redhat.com)
+- lib/base: Allow for empty option value (jarrpa@redhat.com)
+
+* Mon Oct 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.139.0
+- Fix mispelling in error message yammlint -> yamllint (simo@redhat.com)
+- Separate certificate playbooks. (abutcher@redhat.com)
+- Reverting using uninstall variables for logging and metrics
+ (ewolinet@redhat.com)
+- Add --image flag to setup-openshift-heketi-storage (ttindell@isenpai.com)
+
* Mon Oct 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.138.0
- Fix typo in openshift_default_storage_class/README (hansmi@vshn.ch)
- GlusterFS: make ServiceAccounts privileged when either glusterfs or heketi is
diff --git a/playbooks/byo/openshift-cluster/openshift-prometheus.yml b/playbooks/byo/openshift-cluster/openshift-prometheus.yml
index 15917078d..4d3f7f42c 100644
--- a/playbooks/byo/openshift-cluster/openshift-prometheus.yml
+++ b/playbooks/byo/openshift-cluster/openshift-prometheus.yml
@@ -1,4 +1,6 @@
---
- include: initialize_groups.yml
+- include: ../../common/openshift-cluster/std_include.yml
+
- include: ../../common/openshift-cluster/openshift_prometheus.yml
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index b9eb380d3..32e5e708a 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -23,6 +23,7 @@
- include: cockpit-ui.yml
- include: openshift_prometheus.yml
+ when: openshift_hosted_prometheus_deploy | default(False) | bool
- name: Hosted Install Checkpoint End
hosts: localhost
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
index ed89d3bde..ac2d250a3 100644
--- a/playbooks/common/openshift-cluster/openshift_prometheus.yml
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -3,4 +3,3 @@
hosts: oo_first_master
roles:
- role: openshift_prometheus
- when: openshift_hosted_prometheus_deploy | default(False) | bool
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
index 8aa508119..b5ba2bbba 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -20,7 +20,7 @@
/usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }}
--key-file {{ etcd_peer_key_file }}
--ca-file {{ etcd_peer_ca_file }}
- -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
+ -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_ip }}:{{ etcd_client_port }}
member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}
delegate_to: "{{ etcd_ca_host }}"
failed_when:
diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml
index 2ad805858..9a6cf26fc 100644
--- a/playbooks/common/openshift-master/set_network_facts.yml
+++ b/playbooks/common/openshift-master/set_network_facts.yml
@@ -13,7 +13,9 @@
- name: Set network facts for masters
hosts: oo_masters_to_config
gather_facts: no
- tasks:
+ roles:
+ - role: openshift_facts
+ post_tasks:
- block:
- set_fact:
osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}"
@@ -24,5 +26,9 @@
- set_fact:
openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}"
when: openshift_portal_net is not defined
+ - openshift_facts:
+ role: common
+ local_facts:
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
when:
- hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 7ece0e061..dae17c3ce 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -10,20 +10,28 @@
l_use_crio: "{{ openshift_use_crio | default(False) }}"
l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}"
+- when:
+ - openshift_deployment_type == 'openshift-enterprise'
+ assert:
+ that:
+ - "openshift_image_tag is defined"
+ msg: >
+ openshift_image_tag is a required inventory variable when installing openshift-enterprise
+
- name: Use Package Docker if Requested
include: package_docker.yml
when:
- - not l_use_system_container
- - not l_use_crio_only
+ - not l_use_system_container
+ - not l_use_crio_only
- name: Use System Container Docker if Requested
include: systemcontainer_docker.yml
when:
- - l_use_system_container
- - not l_use_crio_only
+ - l_use_system_container
+ - not l_use_crio_only
- name: Add CRI-O usage Requested
include: systemcontainer_crio.yml
when:
- - l_use_crio
- - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
+ - l_use_crio
+ - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index 3e81d5c8e..a35335937 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -61,15 +61,25 @@
- stat: path=/etc/sysconfig/docker
register: docker_check
-- name: Comment old registry params in /etc/sysconfig/docker
+- name: Set registry params
lineinfile:
dest: /etc/sysconfig/docker
regexp: '^{{ item.reg_conf_var }}=.*$'
- line: "#{{ item.reg_conf_var }}=''# Moved to {{ containers_registries_conf_path }}"
+ line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
+ when:
+ - item.reg_fact_val != []
+ - docker_check.stat.isreg is defined
+ - docker_check.stat.isreg
with_items:
- reg_conf_var: ADD_REGISTRY
+ reg_fact_val: "{{ l2_docker_additional_registries }}"
+ reg_flag: --add-registry
- reg_conf_var: BLOCK_REGISTRY
+ reg_fact_val: "{{ l2_docker_blocked_registries }}"
+ reg_flag: --block-registry
- reg_conf_var: INSECURE_REGISTRY
+ reg_fact_val: "{{ l2_docker_insecure_registries }}"
+ reg_flag: --insecure-registry
notify:
- restart docker
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 66ce475e1..386369d26 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -1,4 +1,5 @@
---
+
# TODO: Much of this file is shared with container engine tasks
- set_fact:
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
@@ -92,16 +93,24 @@
- block:
- - name: Set to default prepend
+ - name: Set CRI-O image defaults
set_fact:
l_crio_image_prepend: "docker.io/gscrivano"
l_crio_image_name: "cri-o-fedora"
+ l_crio_image_tag: "latest"
- name: Use Centos based image when distribution is CentOS
set_fact:
l_crio_image_name: "cri-o-centos"
when: ansible_distribution == "CentOS"
+ - name: Set CRI-O image tag
+ set_fact:
+ l_crio_image_tag: "{{ openshift_image_tag }}"
+ when:
+ - openshift_deployment_type == 'openshift-enterprise'
+ - openshift_image_tag is defined
+
- name: Use RHEL based image when distribution is Red Hat
set_fact:
l_crio_image_prepend: "registry.access.redhat.com/openshift3"
@@ -110,7 +119,7 @@
- name: Set the full image name
set_fact:
- l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:latest"
+ l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
# For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548
- name: Use a specific image if requested
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 8b43393cb..5b1605b58 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -1,4 +1,5 @@
---
+
# If docker_options are provided we should fail. We should not install docker and ignore
# the users configuration. NOTE: docker_options == inventory:openshift_docker_options
- name: Fail quickly if openshift_docker_options are set
@@ -89,6 +90,14 @@
- name: Set to default prepend
set_fact:
l_docker_image_prepend: "gscrivano"
+ l_docker_image_tag: "latest"
+
+ - name: Set container engine image tag
+ set_fact:
+ l_docker_image_tag: "{{ openshift_image_tag }}"
+ when:
+ - openshift_deployment_type == 'openshift-enterprise'
+ - openshift_image_tag is defined
- name: Use Red Hat Registry for image when distribution is Red Hat
set_fact:
@@ -102,7 +111,7 @@
- name: Set the full image name
set_fact:
- l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest"
+ l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:{{ l_docker_image_tag }}"
# For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959
- name: Use a specific image if requested
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 1e6eb2386..05b2763d5 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -1421,7 +1421,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py
index 8c6a81cc8..d1dc4caf8 100644
--- a/roles/lib_openshift/library/oc_adm_csr.py
+++ b/roles/lib_openshift/library/oc_adm_csr.py
@@ -1399,7 +1399,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 4a7847e88..152f270ab 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -1407,7 +1407,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index b8af5cad9..3082f5890 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -1393,7 +1393,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 3364f8de3..1ceaf5d0d 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -1393,7 +1393,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index c64d7ffd2..5d6fa1f80 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -1511,7 +1511,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 492494bda..ffea14766 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1536,7 +1536,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index b412ca8af..9761b4b4e 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -1385,7 +1385,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 8bbc22c49..047edffbb 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -1391,7 +1391,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index ad17051cb..0b6a8436b 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -1435,7 +1435,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 74a84ac89..1f52fba40 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -1402,7 +1402,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index eea1516ae..1b63a6c13 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -1375,7 +1375,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index dc33d3b8a..94b08d9ce 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -1394,7 +1394,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 88fd9554d..ad837fdb5 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -1411,7 +1411,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 8408f9ebc..892546e56 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -1414,7 +1414,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index d1be0b534..38df585f0 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -1346,7 +1346,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 9a281e6cd..70632f86d 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -1403,7 +1403,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index b503c330b..4eee748d7 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -1400,7 +1400,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index 7a9e3bf89..2e73a7645 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -1407,7 +1407,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 875e473ad..b46fd5495 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -1445,7 +1445,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index ec3635753..c142f1f43 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -1389,7 +1389,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index c010607e8..19c7fde78 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -1441,7 +1441,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index e83a6e26d..3e8aea4f1 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -1448,7 +1448,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 0d46bbf96..646a39224 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -1387,7 +1387,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 662d77ec1..99a8e8f3d 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -1387,7 +1387,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py
index 574f109e4..e88f3ae8d 100644
--- a/roles/lib_openshift/library/oc_storageclass.py
+++ b/roles/lib_openshift/library/oc_storageclass.py
@@ -1405,7 +1405,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index e430546ee..7bbe38819 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -1447,7 +1447,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index a12620968..63adbd6ac 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -1359,7 +1359,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 134b2ad19..3c07f8d4b 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -1436,7 +1436,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 5a307cdb3..1fb32164e 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -597,7 +597,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/openshift_gcp/defaults/main.yml b/roles/openshift_gcp/defaults/main.yml
new file mode 100644
index 000000000..18fc453b2
--- /dev/null
+++ b/roles/openshift_gcp/defaults/main.yml
@@ -0,0 +1,58 @@
+---
+openshift_gcp_prefix: ''
+
+openshift_gcp_create_network: True
+openshift_gcp_create_registry_bucket: True
+openshift_gcp_kubernetes_cluster_status: owned # or shared
+openshift_gcp_node_group_type: master
+
+openshift_gcp_ssh_private_key: ''
+
+openshift_gcp_project: ''
+openshift_gcp_clusterid: default
+openshift_gcp_region: us-central1
+openshift_gcp_zone: us-central1-a
+
+openshift_gcp_network_name: "{{ openshift_gcp_prefix }}network"
+
+openshift_gcp_iam_service_account: ''
+openshift_gcp_iam_service_account_keyfile: ''
+
+openshift_gcp_master_lb_timeout: 2m
+
+openshift_gcp_infra_network_instance_group: ig-i
+
+openshift_gcp_image: 'rhel-7'
+openshift_gcp_base_image: rhel-7
+
+openshift_gcp_registry_bucket_keyfile: ''
+openshift_gcp_registry_bucket_name: "{{ openshift_gcp_prefix }}-docker-registry"
+
+openshift_gcp_node_group_config:
+ - name: master
+ suffix: m
+ tags: ocp-master
+ machine_type: n1-standard-2
+ boot_disk_size: 150
+ scale: 1
+ - name: infra
+ suffix: i
+ tags: ocp-infra-node ocp-node
+ machine_type: n1-standard-2
+ boot_disk_size: 150
+ scale: 1
+ - name: node
+ suffix: n
+ tags: ocp-node
+ machine_type: n1-standard-2
+ boot_disk_size: 150
+ scale: 3
+ - name: node-flex
+ suffix: nf
+ tags: ocp-node
+ machine_type: n1-standard-2
+ boot_disk_size: 150
+ scale: 0
+
+openshift_gcp_startup_script_file: ''
+openshift_gcp_user_data_file: ''
diff --git a/roles/openshift_gcp/templates/dns.j2.sh b/roles/openshift_gcp/templates/dns.j2.sh
index eacf84b4d..a7475aaf5 100644
--- a/roles/openshift_gcp/templates/dns.j2.sh
+++ b/roles/openshift_gcp/templates/dns.j2.sh
@@ -2,12 +2,12 @@
set -euo pipefail
-dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist
-if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null
+if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null
fi
# Always output the expected nameservers as a comma delimited list
-gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ','
+gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ','
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
index e68e9683f..d72a11de1 100644
--- a/roles/openshift_gcp/templates/provision.j2.sh
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -2,36 +2,38 @@
set -euo pipefail
-# Create SSH key for GCE
-if [ ! -f "{{ gce_ssh_private_key }}" ]; then
- ssh-keygen -t rsa -f "{{ gce_ssh_private_key }}" -C gce-provision-cloud-user -N ''
- ssh-add "{{ gce_ssh_private_key }}" || true
-fi
+if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then
+ # Create SSH key for GCE
+ if [ ! -f "{{ openshift_gcp_ssh_private_key }}" ]; then
+ ssh-keygen -t rsa -f "{{ openshift_gcp_ssh_private_key }}" -C gce-provision-cloud-user -N ''
+ ssh-add "{{ openshift_gcp_ssh_private_key }}" || true
+ fi
-# Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
-pub_key=$(cut -d ' ' -f 2 < "{{ gce_ssh_private_key }}.pub")
-key_tmp_file='/tmp/ocp-gce-keys'
-if ! gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q "$pub_key"; then
- if gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q ssh-rsa; then
- gcloud --project "{{ gce_project_id }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
+ # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
+ pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub")
+ key_tmp_file='/tmp/ocp-gce-keys'
+ if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then
+ if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then
+ gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
+ fi
+ echo -n 'cloud-user:' >> "$key_tmp_file"
+ cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file"
+ gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
+ rm -f "$key_tmp_file"
fi
- echo -n 'cloud-user:' >> "$key_tmp_file"
- cat "{{ gce_ssh_private_key }}.pub" >> "$key_tmp_file"
- gcloud --project "{{ gce_project_id }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
- rm -f "$key_tmp_file"
fi
metadata=""
-if [[ -n "{{ provision_gce_startup_script_file }}" ]]; then
- if [[ ! -f "{{ provision_gce_startup_script_file }}" ]]; then
- echo "Startup script file missing at {{ provision_gce_startup_script_file }} from=$(pwd)"
+if [[ -n "{{ openshift_gcp_startup_script_file }}" ]]; then
+ if [[ ! -f "{{ openshift_gcp_startup_script_file }}" ]]; then
+ echo "Startup script file missing at {{ openshift_gcp_startup_script_file }} from=$(pwd)"
exit 1
fi
- metadata+="--metadata-from-file=startup-script={{ provision_gce_startup_script_file }}"
+ metadata+="--metadata-from-file=startup-script={{ openshift_gcp_startup_script_file }}"
fi
-if [[ -n "{{ provision_gce_user_data_file }}" ]]; then
- if [[ ! -f "{{ provision_gce_user_data_file }}" ]]; then
- echo "User data file missing at {{ provision_gce_user_data_file }}"
+if [[ -n "{{ openshift_gcp_user_data_file }}" ]]; then
+ if [[ ! -f "{{ openshift_gcp_user_data_file }}" ]]; then
+ echo "User data file missing at {{ openshift_gcp_user_data_file }}"
exit 1
fi
if [[ -n "${metadata}" ]]; then
@@ -39,14 +41,14 @@ if [[ -n "{{ provision_gce_user_data_file }}" ]]; then
else
metadata="--metadata-from-file="
fi
- metadata+="user-data={{ provision_gce_user_data_file }}"
+ metadata+="user-data={{ openshift_gcp_user_data_file }}"
fi
# Select image or image family
-image="{{ provision_gce_registered_image }}"
-if ! gcloud --project "{{ gce_project_id }}" compute images describe "${image}" &>/dev/null; then
- if ! gcloud --project "{{ gce_project_id }}" compute images describe-from-family "${image}" &>/dev/null; then
- echo "No compute image or image-family found, create an image named '{{ provision_gce_registered_image }}' to continue'"
+image="{{ openshift_gcp_image }}"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe "${image}" &>/dev/null; then
+ if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe-from-family "${image}" &>/dev/null; then
+ echo "No compute image or image-family found, create an image named '{{ openshift_gcp_image }}' to continue'"
exit 1
fi
image="family/${image}"
@@ -54,19 +56,19 @@ fi
### PROVISION THE INFRASTRUCTURE ###
-dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
-if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
exit 1
fi
# Create network
-if ! gcloud --project "{{ gce_project_id }}" compute networks describe "{{ gce_network_name }}" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute networks create "{{ gce_network_name }}" --mode "auto"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute networks describe "{{ openshift_gcp_network_name }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute networks create "{{ openshift_gcp_network_name }}" --mode "auto"
else
- echo "Network '{{ gce_network_name }}' already exists"
+ echo "Network '{{ openshift_gcp_network_name }}' already exists"
fi
# Firewall rules in a form:
@@ -87,56 +89,56 @@ declare -A FW_RULES=(
['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
)
for rule in "${!FW_RULES[@]}"; do
- ( if ! gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute firewall-rules create "{{ provision_prefix }}$rule" --network "{{ gce_network_name }}" ${FW_RULES[$rule]}
+ ( if ! gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules create "{{ openshift_gcp_prefix }}$rule" --network "{{ openshift_gcp_network_name }}" ${FW_RULES[$rule]}
else
- echo "Firewall rule '{{ provision_prefix }}${rule}' already exists"
+ echo "Firewall rule '{{ openshift_gcp_prefix }}${rule}' already exists"
fi ) &
done
# Master IP
-( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-ssl-lb-ip" --global
+( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global
else
- echo "IP '{{ provision_prefix }}master-ssl-lb-ip' already exists"
+ echo "IP '{{ openshift_gcp_prefix }}master-ssl-lb-ip' already exists"
fi ) &
# Internal master IP
-( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}"
+( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}"
else
- echo "IP '{{ provision_prefix }}master-network-lb-ip' already exists"
+ echo "IP '{{ openshift_gcp_prefix }}master-network-lb-ip' already exists"
fi ) &
# Router IP
-( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}"
+( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}"
else
- echo "IP '{{ provision_prefix }}router-network-lb-ip' already exists"
+ echo "IP '{{ openshift_gcp_prefix }}router-network-lb-ip' already exists"
fi ) &
-{% for node_group in provision_gce_node_groups %}
+{% for node_group in openshift_gcp_node_group_config %}
# configure {{ node_group.name }}
(
- if ! gcloud --project "{{ gce_project_id }}" compute instance-templates describe "{{ provision_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute instance-templates create "{{ provision_prefix }}instance-template-{{ node_group.name }}" \
- --machine-type "{{ node_group.machine_type }}" --network "{{ gce_network_name }}" \
- --tags "{{ provision_prefix }}ocp,ocp,{{ node_group.tags }}" \
+ if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \
+ --machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \
+ --tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ node_group.tags }}" \
--boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
--scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
--image "${image}" ${metadata}
else
- echo "Instance template '{{ provision_prefix }}instance-template-{{ node_group.name }}' already exists"
+ echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists"
fi
# Create instance group
- if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed describe "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute instance-groups managed create "{{ provision_prefix }}ig-{{ node_group.suffix }}" \
- --zone "{{ gce_zone_name }}" --template "{{ provision_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
+ if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed describe "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed create "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" \
+ --zone "{{ openshift_gcp_zone }}" --template "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
else
- echo "Instance group '{{ provision_prefix }}ig-{{ node_group.suffix }}' already exists"
+ echo "Instance group '{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}' already exists"
fi
) &
{% endfor %}
@@ -147,36 +149,36 @@ for i in `jobs -p`; do wait $i; done
# Configure the master external LB rules
(
# Master health check
-if ! gcloud --project "{{ gce_project_id }}" compute health-checks describe "{{ provision_prefix }}master-ssl-lb-health-check" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute health-checks create https "{{ provision_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute health-checks describe "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute health-checks create https "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
else
- echo "Health check '{{ provision_prefix }}master-ssl-lb-health-check' already exists"
+ echo "Health check '{{ openshift_gcp_prefix }}master-ssl-lb-health-check' already exists"
fi
-gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-named-ports "{{ provision_prefix }}ig-m" \
- --zone "{{ gce_zone_name }}" --named-ports "{{ provision_prefix }}port-name-master:{{ internal_console_port }}"
+gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-named-ports "{{ openshift_gcp_prefix }}ig-m" \
+ --zone "{{ openshift_gcp_zone }}" --named-ports "{{ openshift_gcp_prefix }}port-name-master:{{ internal_console_port }}"
# Master backend service
-if ! gcloud --project "{{ gce_project_id }}" compute backend-services describe "{{ provision_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute backend-services create "{{ provision_prefix }}master-ssl-lb-backend" --health-checks "{{ provision_prefix }}master-ssl-lb-health-check" --port-name "{{ provision_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ provision_gce_master_https_timeout | default('2m') }}"
- gcloud --project "{{ gce_project_id }}" compute backend-services add-backend "{{ provision_prefix }}master-ssl-lb-backend" --instance-group "{{ provision_prefix }}ig-m" --global --instance-group-zone "{{ gce_zone_name }}"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute backend-services describe "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute backend-services create "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --health-checks "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port-name "{{ openshift_gcp_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ openshift_gcp_master_lb_timeout }}"
+ gcloud --project "{{ openshift_gcp_project }}" compute backend-services add-backend "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --instance-group "{{ openshift_gcp_prefix }}ig-m" --global --instance-group-zone "{{ openshift_gcp_zone }}"
else
- echo "Backend service '{{ provision_prefix }}master-ssl-lb-backend' already exists"
+ echo "Backend service '{{ openshift_gcp_prefix }}master-ssl-lb-backend' already exists"
fi
# Master tcp proxy target
-if ! gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies describe "{{ provision_prefix }}master-ssl-lb-target" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies create "{{ provision_prefix }}master-ssl-lb-target" --backend-service "{{ provision_prefix }}master-ssl-lb-backend"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies describe "{{ openshift_gcp_prefix }}master-ssl-lb-target" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies create "{{ openshift_gcp_prefix }}master-ssl-lb-target" --backend-service "{{ openshift_gcp_prefix }}master-ssl-lb-backend"
else
- echo "Proxy target '{{ provision_prefix }}master-ssl-lb-target' already exists"
+ echo "Proxy target '{{ openshift_gcp_prefix }}master-ssl-lb-target' already exists"
fi
# Master forwarding rule
-if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
- IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
- gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ provision_prefix }}master-ssl-lb-target"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
+ IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ openshift_gcp_prefix }}master-ssl-lb-target"
else
- echo "Forwarding rule '{{ provision_prefix }}master-ssl-lb-rule' already exists"
+ echo "Forwarding rule '{{ openshift_gcp_prefix }}master-ssl-lb-rule' already exists"
fi
) &
@@ -184,25 +186,25 @@ fi
# Configure the master internal LB rules
(
# Internal master health check
-if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}master-network-lb-health-check" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}master-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
else
- echo "Health check '{{ provision_prefix }}master-network-lb-health-check' already exists"
+ echo "Health check '{{ openshift_gcp_prefix }}master-network-lb-health-check' already exists"
fi
# Internal master target pool
-if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}master-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}master-network-lb-pool" --http-health-check "{{ provision_prefix }}master-network-lb-health-check" --region "{{ gce_region_name }}"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}master-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}master-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}master-network-lb-health-check" --region "{{ openshift_gcp_region }}"
else
- echo "Target pool '{{ provision_prefix }}master-network-lb-pool' already exists"
+ echo "Target pool '{{ openshift_gcp_prefix }}master-network-lb-pool' already exists"
fi
# Internal master forwarding rule
-if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
- IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
- gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}master-network-lb-pool"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
+ gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}master-network-lb-pool"
else
- echo "Forwarding rule '{{ provision_prefix }}master-network-lb-rule' already exists"
+ echo "Forwarding rule '{{ openshift_gcp_prefix }}master-network-lb-rule' already exists"
fi
) &
@@ -210,25 +212,25 @@ fi
# Configure the infra node rules
(
# Router health check
-if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}router-network-lb-health-check" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}router-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
else
- echo "Health check '{{ provision_prefix }}router-network-lb-health-check' already exists"
+ echo "Health check '{{ openshift_gcp_prefix }}router-network-lb-health-check' already exists"
fi
# Router target pool
-if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}router-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}router-network-lb-pool" --http-health-check "{{ provision_prefix }}router-network-lb-health-check" --region "{{ gce_region_name }}"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}router-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}router-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}router-network-lb-health-check" --region "{{ openshift_gcp_region }}"
else
- echo "Target pool '{{ provision_prefix }}router-network-lb-pool' already exists"
+ echo "Target pool '{{ openshift_gcp_prefix }}router-network-lb-pool' already exists"
fi
# Router forwarding rule
-if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}router-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
- IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
- gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}router-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}router-network-lb-pool"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}router-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
+ gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}router-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}router-network-lb-pool"
else
- echo "Forwarding rule '{{ provision_prefix }}router-network-lb-rule' already exists"
+ echo "Forwarding rule '{{ openshift_gcp_prefix }}router-network-lb-rule' already exists"
fi
) &
@@ -236,11 +238,11 @@ for i in `jobs -p`; do wait $i; done
# set the target pools
(
-if [[ "ig-m" == "{{ provision_gce_router_network_instance_group }}" ]]; then
- gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool,{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
+if [[ "ig-m" == "{{ openshift_gcp_infra_network_instance_group }}" ]]; then
+ gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool,{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
else
- gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool" --zone "{{ gce_zone_name }}"
- gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}{{ provision_gce_router_network_instance_group }}" --target-pools "{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
+ gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
+ gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}{{ openshift_gcp_infra_network_instance_group }}" --target-pools "{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
fi
) &
@@ -252,42 +254,42 @@ while true; do
rm -f $dns
# DNS record for master lb
- if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
- IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
+ IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
if [[ ! -f $dns ]]; then
- gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
fi
- gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
else
echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
fi
# DNS record for internal master lb
- if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
- IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
+ IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
if [[ ! -f $dns ]]; then
- gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
fi
- gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
else
echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
fi
# DNS record for router lb
- if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
- IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
+ IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
if [[ ! -f $dns ]]; then
- gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
fi
- gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP"
- gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP"
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
else
echo "DNS record for '{{ wildcard_zone }}' already exists"
fi
# Commit all DNS changes, retrying if preconditions are not met
if [[ -f $dns ]]; then
- if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
rc=$?
if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
continue
@@ -301,17 +303,17 @@ done
# Create bucket for registry
(
-if ! gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
- gsutil mb -p "{{ gce_project_id }}" -l "{{ gce_region_name }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
+if ! gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then
+ gsutil mb -p "{{ openshift_gcp_project }}" -l "{{ openshift_gcp_region }}" "gs://{{ openshift_gcp_registry_bucket_name }}"
else
- echo "Bucket '{{ openshift_hosted_registry_storage_gcs_bucket }}' already exists"
+ echo "Bucket '{{ openshift_gcp_registry_bucket_name }}' already exists"
fi
) &
# wait until all node groups are stable
-{% for node_group in provision_gce_node_groups %}
+{% for node_group in openshift_gcp_node_group_config %}
# wait for stable {{ node_group.name }}
-( gcloud --project "{{ gce_project_id }}" compute instance-groups managed wait-until-stable "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --timeout=300) &
+( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=300) &
{% endfor %}
diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh
index 41ceab2b5..a1e0affec 100644
--- a/roles/openshift_gcp/templates/remove.j2.sh
+++ b/roles/openshift_gcp/templates/remove.j2.sh
@@ -18,8 +18,8 @@ function teardown_cmd() {
if [[ -z "${found}" ]]; then
flag=$((flag+1))
fi
- if gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then
- gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag}
+ if gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag}
fi
}
@@ -33,11 +33,11 @@ function teardown() {
}
# Preemptively spin down the instances
-{% for node_group in provision_gce_node_groups %}
+{% for node_group in openshift_gcp_node_group_config %}
# scale down {{ node_group.name }}
(
# performs a delete and scale down as one operation to ensure maximum parallelism
- if ! instances=$( gcloud --project "{{ gce_project_id }}" compute instance-groups managed list-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --format='value[terminator=","](instance)' ); then
+ if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' ); then
exit 0
fi
instances="${instances%?}"
@@ -45,7 +45,7 @@ function teardown() {
echo "warning: No instances in {{ node_group.name }}" 1>&2
exit 0
fi
- if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed delete-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --instances "${instances}"; then
+ if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed delete-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --instances "${instances}"; then
echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2
exit 0
fi
@@ -54,15 +54,15 @@ function teardown() {
# Bucket for registry
(
-if gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
- gsutil -m rm -r "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
+if gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then
+ gsutil -m rm -r "gs://{{ openshift_gcp_registry_bucket_name }}"
fi
) &
# DNS
(
-dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
-if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
+if gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
# Retry DNS changes until they succeed since this may be a shared resource
while true; do
dns="${TMPDIR:-/tmp}/dns.yaml"
@@ -70,16 +70,16 @@ if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zon
# export all dns records that match into a zone format, and turn each line into a set of args for
# record-sets transaction.
- gcloud dns record-sets export --project "{{ gce_project_id }}" -z "${dns_zone}" --zone-file-format "${dns}"
+ gcloud dns record-sets export --project "{{ openshift_gcp_project }}" -z "${dns_zone}" --zone-file-format "${dns}"
if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \
awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input"
then
rm -f "${dns}"
- gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
- cat "${dns}.input" | xargs -L1 gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}"
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ cat "${dns}.input" | xargs -L1 gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}"
# Commit all DNS changes, retrying if preconditions are not met
- if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
rc=$?
if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
continue
@@ -95,25 +95,25 @@ fi
(
# Router network rules
-teardown "{{ provision_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
-teardown "{{ provision_prefix }}router-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
-teardown "{{ provision_prefix }}router-network-lb-health-check" compute http-health-checks
-teardown "{{ provision_prefix }}router-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
+teardown "{{ openshift_gcp_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}"
+teardown "{{ openshift_gcp_prefix }}router-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}"
+teardown "{{ openshift_gcp_prefix }}router-network-lb-health-check" compute http-health-checks
+teardown "{{ openshift_gcp_prefix }}router-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}"
# Internal master network rules
-teardown "{{ provision_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
-teardown "{{ provision_prefix }}master-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
-teardown "{{ provision_prefix }}master-network-lb-health-check" compute http-health-checks
-teardown "{{ provision_prefix }}master-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
+teardown "{{ openshift_gcp_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}"
+teardown "{{ openshift_gcp_prefix }}master-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}"
+teardown "{{ openshift_gcp_prefix }}master-network-lb-health-check" compute http-health-checks
+teardown "{{ openshift_gcp_prefix }}master-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}"
) &
(
# Master SSL network rules
-teardown "{{ provision_prefix }}master-ssl-lb-rule" compute forwarding-rules --global
-teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-tcp-proxies
-teardown "{{ provision_prefix }}master-ssl-lb-ip" compute addresses --global
-teardown "{{ provision_prefix }}master-ssl-lb-backend" compute backend-services --global
-teardown "{{ provision_prefix }}master-ssl-lb-health-check" compute health-checks
+teardown "{{ openshift_gcp_prefix }}master-ssl-lb-rule" compute forwarding-rules --global
+teardown "{{ openshift_gcp_prefix }}master-ssl-lb-target" compute target-tcp-proxies
+teardown "{{ openshift_gcp_prefix }}master-ssl-lb-ip" compute addresses --global
+teardown "{{ openshift_gcp_prefix }}master-ssl-lb-backend" compute backend-services --global
+teardown "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" compute health-checks
) &
#Firewall rules
@@ -130,10 +130,10 @@ declare -A FW_RULES=(
['infra-node-external']=""
)
for rule in "${!FW_RULES[@]}"; do
- ( if gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
+ ( if gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then
# retry a few times because this call can be flaky
for i in `seq 1 3`; do
- if gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"; then
+ if gcloud -q --project "{{ openshift_gcp_project }}" compute firewall-rules delete "{{ openshift_gcp_prefix }}$rule"; then
break
fi
done
@@ -142,15 +142,15 @@ done
for i in `jobs -p`; do wait $i; done
-{% for node_group in provision_gce_node_groups %}
+{% for node_group in openshift_gcp_node_group_config %}
# teardown {{ node_group.name }} - any load balancers referencing these groups must be removed
(
- teardown "{{ provision_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ gce_zone_name }}"
- teardown "{{ provision_prefix }}instance-template-{{ node_group.name }}" compute instance-templates
+ teardown "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ openshift_gcp_zone }}"
+ teardown "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" compute instance-templates
) &
{% endfor %}
for i in `jobs -p`; do wait $i; done
# Network
-teardown "{{ gce_network_name }}" compute networks
+teardown "{{ openshift_gcp_network_name }}" compute networks
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 93a5973d4..fa07c1dde 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -1,5 +1,6 @@
"""Check that required Docker images are available."""
+from ansible.module_utils import six
from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import DockerHostMixin
@@ -153,7 +154,15 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
def known_docker_registries(self):
"""Build a list of docker registries available according to inventory vars."""
- regs = list(self.get_var("openshift_docker_additional_registries", default=[]))
+ regs = self.get_var("openshift_docker_additional_registries", default=[])
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1497274
+ # if the result was a string type, place it into a list. We must do this
+ # as using list() on a string will split the string into its characters.
+ if isinstance(regs, six.string_types):
+ regs = [regs]
+ else:
+ # Otherwise cast to a list as was done previously
+ regs = list(regs)
deployment_type = self.get_var("openshift_deployment_type")
if deployment_type == "origin" and "docker.io" not in regs:
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 5574a1446..0f1f659c6 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -8,7 +8,6 @@ openshift_logging_labels: {}
openshift_logging_label_key: ""
openshift_logging_label_value: ""
openshift_logging_install_logging: False
-openshift_logging_uninstall_logging: False
openshift_logging_purge_logging: False
openshift_logging_image_pull_secret: ""
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index 0da9771c7..15f6a23e6 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -36,7 +36,7 @@
- include: delete_logging.yaml
when:
- - openshift_logging_uninstall_logging | default(false) | bool
+ - not openshift_logging_install_logging | default(false) | bool
- name: Cleaning up local temp dir
local_action: file path="{{local_tmp.stdout}}" state=absent
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index 084b734ee..8da74430f 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -1,7 +1,6 @@
---
openshift_metrics_start_cluster: True
openshift_metrics_install_metrics: False
-openshift_metrics_uninstall_metrics: False
openshift_metrics_startup_timeout: 500
openshift_metrics_hawkular_replicas: 1
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index c92458c50..10509fc1e 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -49,7 +49,7 @@
- include: uninstall_metrics.yaml
when:
- - openshift_metrics_uninstall_metrics | bool
+ - not openshift_metrics_install_metrics | bool
- include: uninstall_hosa.yaml
when: not openshift_metrics_install_hawkular_agent | bool
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 2759188f3..e3898b520 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -46,6 +46,22 @@
notify:
- restart node
+- name: Configure AWS Cloud Provider Settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ create: true
+ with_items:
+ - regex: '^AWS_ACCESS_KEY_ID='
+ line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
+ - regex: '^AWS_SECRET_ACCESS_KEY='
+ line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
+ no_log: True
+ when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined
+ notify:
+ - restart node
+
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
# systemd to start the master again
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index e82fb42b8..59b8bb76e 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -76,22 +76,6 @@
include: config.yml
when: not openshift_node_bootstrap
-- name: Configure AWS Cloud Provider Settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^AWS_ACCESS_KEY_ID='
- line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
- - regex: '^AWS_SECRET_ACCESS_KEY='
- line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
- no_log: True
- when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined
- notify:
- - restart node
-
#### Storage class plugins here ####
- name: NFS storage plugin configuration
include: storage_plugins/nfs.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 54a6dd7c3..074904bec 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -1,6 +1,6 @@
---
- name: Create heketi DB volume
- command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"
+ command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image}}:{{ glusterfs_heketi_version }} --listfile /tmp/heketi-storage.json"
register: setup_storage
- name: Copy heketi-storage list