diff options
136 files changed, 1306 insertions, 742 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 1541b6a5f..9dcd067e5 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.7.0-0.138.0 ./ +3.7.0-0.143.0 ./ diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index f0f250480..83a05370a 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -707,11 +707,6 @@ def oo_openshift_env(hostvars): if regex.match(key): facts[key] = hostvars[key] - migrations = {'openshift_router_selector': 'openshift_hosted_router_selector', - 'openshift_registry_selector': 'openshift_hosted_registry_selector'} - for old_fact, new_fact in migrations.items(): - if old_fact in facts and new_fact not in facts: - facts[new_fact] = facts[old_fact] return facts diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 4a0630a69..c8c60bb60 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -425,6 +425,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # path using these options would be "/exports/registry" #openshift_hosted_registry_storage_kind=nfs #openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +# nfs_directory must conform to DNS-1123 subdomain must consist of lower case +# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character #openshift_hosted_registry_storage_nfs_directory=/exports #openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' #openshift_hosted_registry_storage_volume_name=registry @@ -437,6 +439,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_kind=nfs #openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] #openshift_hosted_registry_storage_host=nfs.example.com +# nfs_directory must conform to DNS-1123 subdomain must consist of lower case +# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character #openshift_hosted_registry_storage_nfs_directory=/exports #openshift_hosted_registry_storage_volume_name=registry #openshift_hosted_registry_storage_volume_size=10Gi @@ -535,7 +539,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics # Currently, you may only alter the hostname portion of the url, alterting the # `/hawkular/metrics` path will break installation of metrics. -#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics +#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com # Configure the prefix and version for the component images #openshift_metrics_image_prefix=docker.io/openshift/origin- #openshift_metrics_image_version=v3.7.0 diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 03fbcc63c..30987fa38 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -432,6 +432,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # path using these options would be "/exports/registry" #openshift_hosted_registry_storage_kind=nfs #openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +# nfs_directory must conform to DNS-1123 subdomain must consist of lower case +# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character #openshift_hosted_registry_storage_nfs_directory=/exports #openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' #openshift_hosted_registry_storage_volume_name=registry @@ -444,6 +446,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_kind=nfs #openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] #openshift_hosted_registry_storage_host=nfs.example.com +# nfs_directory must conform to DNS-1123 subdomain must consist of lower case +# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character #openshift_hosted_registry_storage_nfs_directory=/exports #openshift_hosted_registry_storage_volume_name=registry #openshift_hosted_registry_storage_volume_size=10Gi @@ -543,7 +547,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics # Currently, you may only alter the hostname portion of the url, alterting the # `/hawkular/metrics` path will break installation of metrics. -#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics +#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com # Configure the prefix and version for the component images #openshift_metrics_image_prefix=registry.example.com:8888/openshift3/ #openshift_metrics_image_version=3.7.0 diff --git a/openshift-ansible.spec b/openshift-ansible.spec index b2db44b86..c3a477bf6 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.7.0 -Release: 0.138.0%{?dist} +Release: 0.143.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -276,6 +276,39 @@ Atomic OpenShift Utilities includes %changelog +* Wed Oct 04 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.143.0 +- Limit base-package install during master upgrades (mgugino@redhat.com) +- Fix provisiong scale group and elb logic (mgugino@redhat.com) + +* Tue Oct 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.142.0 +- Document that nfs_directory must conform to DNS-1123 (sdodson@redhat.com) +- Move node aws credentials to config.yml (mgugino@redhat.com) +- Use etcd_ip when communicating with the cluster as a peer in etcd scaleup. + (abutcher@redhat.com) +- Ensure openshift.common.portal_net updated during scaleup. + (abutcher@redhat.com) +- docker: fix some tox warnings (gscrivan@redhat.com) +- Require openshift_image_tag in the inventory with openshift-enterprise + (gscrivan@redhat.com) +- crio: use the image_tag on RHEL (gscrivan@redhat.com) +- docker: use the image_tag on RHEL (gscrivan@redhat.com) + +* Tue Oct 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.141.0 +- Restore registires to /etc/sysconfig/docker (mgugino@redhat.com) +- Fix Prometheus byo entry point (rteague@redhat.com) +- Update to the openshift_aws style scheme for variables (ccoleman@redhat.com) + +* Tue Oct 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.140.0 +- openshift_checks: Fix incorrect list cast (smilner@redhat.com) +- lib/base: Allow for empty option value (jarrpa@redhat.com) + +* Mon Oct 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.139.0 +- Fix mispelling in error message yammlint -> yamllint (simo@redhat.com) +- Separate certificate playbooks. (abutcher@redhat.com) +- Reverting using uninstall variables for logging and metrics + (ewolinet@redhat.com) +- Add --image flag to setup-openshift-heketi-storage (ttindell@isenpai.com) + * Mon Oct 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.138.0 - Fix typo in openshift_default_storage_class/README (hansmi@vshn.ch) - GlusterFS: make ServiceAccounts privileged when either glusterfs or heketi is diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index 2b3d4329e..816cb35b4 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -87,11 +87,6 @@ masters nodes etcd -[OSEv3:children] -masters -nodes -etcd - [OSEv3:vars] ################################################################################ # Ensure these variables are set for bootstrap diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml index 1e54f0467..1ab1e8041 100644 --- a/playbooks/aws/openshift-cluster/build_ami.yml +++ b/playbooks/aws/openshift-cluster/build_ami.yml @@ -17,35 +17,17 @@ - name: openshift_aws_region msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - - name: create an instance and prepare for ami - include_role: - name: openshift_aws - tasks_from: build_ami.yml - vars: - openshift_aws_node_group_type: compute +- include: provision_vpc.yml - - name: fetch newly created instances - ec2_remote_facts: - region: "{{ openshift_aws_region | default('us-east-1') }}" - filters: - "tag:Name": "{{ openshift_aws_base_ami_name | default('ami_base') }}" - instance-state-name: running - register: instancesout - retries: 20 - delay: 3 - until: instancesout.instances|length > 0 +- include: provision_ssh_keypair.yml - - name: wait for ssh to become available - wait_for: - port: 22 - host: "{{ instancesout.instances[0].public_ip_address }}" - timeout: 300 - search_regex: OpenSSH +- include: provision_sec_group.yml + vars: + openshift_aws_node_group_type: compute - - name: add host to nodes - add_host: - groups: nodes - name: "{{ instancesout.instances[0].public_dns_name }}" +- include: provision_instance.yml + vars: + openshift_aws_node_group_type: compute - hosts: nodes gather_facts: False @@ -54,34 +36,10 @@ set_fact: ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default('root') }}" -- name: normalize groups - include: ../../byo/openshift-cluster/initialize_groups.yml +# This is the part that installs all of the software and configs for the instance +# to become a node. +- include: ../../common/openshift-node/image_prep.yml -- name: run the std_include - include: ../../common/openshift-cluster/evaluate_groups.yml - -- name: run the std_include - include: ../../common/openshift-cluster/initialize_facts.yml - -- name: run the std_include - include: ../../common/openshift-cluster/initialize_openshift_repos.yml - -- name: run node config setup - include: ../../common/openshift-node/setup.yml - -- name: run node config - include: ../../common/openshift-node/configure_nodes.yml - -- name: Re-enable excluders - include: ../../common/openshift-node/enable_excluders.yml - -- hosts: localhost - connection: local - become: no - tasks: - - name: seal the ami - include_role: - name: openshift_aws - tasks_from: seal_ami.yml - vars: - openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" +- include: seal_ami.yml + vars: + openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml index 86d58a68e..4d0bf9531 100644 --- a/playbooks/aws/openshift-cluster/install.yml +++ b/playbooks/aws/openshift-cluster/install.yml @@ -1,68 +1,19 @@ --- -- name: Setup the vpc and the master node group +- name: Setup the master node group hosts: localhost tasks: - - name: Alert user to variables needed - clusterid - debug: - msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" - - - name: Alert user to variables needed - region - debug: - msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - - - name: fetch newly created instances - ec2_remote_facts: - region: "{{ openshift_aws_region | default('us-east-1') }}" - filters: - "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" - "tag:host-type": master - instance-state-name: running - register: instancesout - retries: 20 - delay: 3 - until: instancesout.instances|length > 0 - - - name: add new master to masters group - add_host: - groups: "masters,etcd,nodes" - name: "{{ item.public_ip_address }}" - hostname: "{{ openshift_aws_clusterid | default('default') }}-master-{{ item.id[:-5] }}" - with_items: "{{ instancesout.instances }}" - - - name: wait for ssh to become available - wait_for: - port: 22 - host: "{{ item.public_ip_address }}" - timeout: 300 - search_regex: OpenSSH - with_items: "{{ instancesout.instances }}" + - include_role: + name: openshift_aws + tasks_from: setup_master_group.yml - name: set the master facts for hostname to elb hosts: masters gather_facts: no remote_user: root tasks: - - name: fetch elbs - ec2_elb_facts: - region: "{{ openshift_aws_region | default('us-east-1') }}" - names: - - "{{ item }}" - with_items: - - "{{ openshift_aws_clusterid | default('default') }}-master-external" - - "{{ openshift_aws_clusterid | default('default') }}-master-internal" - delegate_to: localhost - register: elbs - - - debug: var=elbs - - - name: set fact - set_fact: - openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" - osm_custom_cors_origins: - - "{{ elbs.results[1].elbs[0].dns_name }}" - - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" - - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" - with_items: "{{ groups['masters'] }}" + - include_role: + name: openshift_aws + tasks_from: master_facts.yml - name: normalize groups include: ../../byo/openshift-cluster/initialize_groups.yml diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml index 8f018abd0..4b5bd22ea 100644 --- a/playbooks/aws/openshift-cluster/provision.yml +++ b/playbooks/aws/openshift-cluster/provision.yml @@ -1,5 +1,5 @@ --- -- name: Setup the vpc and the master node group +- name: Setup the elb and the master node group hosts: localhost tasks: diff --git a/playbooks/aws/openshift-cluster/provision_instance.yml b/playbooks/aws/openshift-cluster/provision_instance.yml new file mode 100644 index 000000000..6e843453c --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_instance.yml @@ -0,0 +1,12 @@ +--- +# If running this play directly, be sure the variable +# 'openshift_aws_node_group_type' is set correctly for your usage. +# See build_ami.yml for an example. +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: create an instance and prepare for ami + include_role: + name: openshift_aws + tasks_from: provision_instance.yml diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml new file mode 100644 index 000000000..039357adb --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml @@ -0,0 +1,13 @@ +--- +# If running this play directly, be sure the variable +# 'openshift_aws_node_group_type' is set correctly for your usage. +# See build_ami.yml for an example. +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: create an instance and prepare for ami + include_role: + name: openshift_aws + tasks_from: security_group.yml + when: openshift_aws_create_security_groups | default(True) | bool diff --git a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml new file mode 100644 index 000000000..3ec683958 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml @@ -0,0 +1,12 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: create an instance and prepare for ami + include_role: + name: openshift_aws + tasks_from: ssh_keys.yml + vars: + openshift_aws_node_group_type: compute + when: openshift_aws_users | default([]) | length > 0 diff --git a/playbooks/aws/openshift-cluster/provision_vpc.yml b/playbooks/aws/openshift-cluster/provision_vpc.yml new file mode 100644 index 000000000..0a23a6d32 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_vpc.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: create a vpc + include_role: + name: openshift_aws + tasks_from: vpc.yml + when: openshift_aws_create_vpc | default(True) | bool diff --git a/playbooks/aws/openshift-cluster/seal_ami.yml b/playbooks/aws/openshift-cluster/seal_ami.yml new file mode 100644 index 000000000..8239a64fb --- /dev/null +++ b/playbooks/aws/openshift-cluster/seal_ami.yml @@ -0,0 +1,12 @@ +--- +# If running this play directly, be sure the variable +# 'openshift_aws_ami_name' is set correctly for your usage. +# See build_ami.yml for an example. +- hosts: localhost + connection: local + become: no + tasks: + - name: seal the ami + include_role: + name: openshift_aws + tasks_from: seal_ami.yml diff --git a/playbooks/byo/openshift-cluster/openshift-prometheus.yml b/playbooks/byo/openshift-cluster/openshift-prometheus.yml index 15917078d..4d3f7f42c 100644 --- a/playbooks/byo/openshift-cluster/openshift-prometheus.yml +++ b/playbooks/byo/openshift-cluster/openshift-prometheus.yml @@ -1,4 +1,6 @@ --- - include: initialize_groups.yml +- include: ../../common/openshift-cluster/std_include.yml + - include: ../../common/openshift-cluster/openshift_prometheus.yml diff --git a/playbooks/byo/openshift-master/certificates.yml b/playbooks/byo/openshift-master/certificates.yml index 26b964034..e147dcba1 100644 --- a/playbooks/byo/openshift-master/certificates.yml +++ b/playbooks/byo/openshift-master/certificates.yml @@ -3,6 +3,4 @@ - include: ../../common/openshift-cluster/std_include.yml -- include: ../../common/openshift-master/ca.yml - - include: ../../common/openshift-master/certificates.yml diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml index 9f992cca6..e0c36fb69 100644 --- a/playbooks/byo/openshift-node/scaleup.yml +++ b/playbooks/byo/openshift-node/scaleup.yml @@ -16,6 +16,4 @@ - include: ../../common/openshift-cluster/std_include.yml -- include: ../../common/openshift-node/certificates.yml - - include: ../../common/openshift-node/config.yml diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 4ca0d48e4..96a43230d 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -18,10 +18,6 @@ - docker_image_availability - docker_storage -- include: ../openshift-etcd/ca.yml - -- include: ../openshift-etcd/certificates.yml - - include: ../openshift-etcd/config.yml - include: ../openshift-nfs/config.yml @@ -30,16 +26,10 @@ - include: ../openshift-loadbalancer/config.yml when: groups.oo_lb_to_config | default([]) | count > 0 -- include: ../openshift-master/ca.yml - -- include: ../openshift-master/certificates.yml - - include: ../openshift-master/config.yml - include: ../openshift-master/additional_config.yml -- include: ../openshift-node/certificates.yml - - include: ../openshift-node/config.yml - include: ../openshift-glusterfs/config.yml diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 1b186f181..e6400ea61 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,6 +1,9 @@ --- +# openshift_install_base_package_group may be set in a play variable to limit +# the host groups the base package is installed on. This is currently used +# for master/control-plane upgrades. - name: Set version_install_base_package true on masters and nodes - hosts: oo_masters_to_config:oo_nodes_to_config + hosts: "{{ openshift_install_base_package_group | default('oo_masters_to_config:oo_nodes_to_config') }}" tasks: - name: Set version_install_base_package true set_fact: @@ -16,8 +19,8 @@ # NOTE: We set this even on etcd hosts as they may also later run as masters, # and we don't want to install wrong version of docker and have to downgrade # later. -- name: Set openshift_version for all hosts - hosts: oo_all_hosts:!oo_first_master +- name: Set openshift_version for etcd, node, and master hosts + hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master vars: openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" pre_tasks: diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index b9eb380d3..32e5e708a 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -23,6 +23,7 @@ - include: cockpit-ui.yml - include: openshift_prometheus.yml + when: openshift_hosted_prometheus_deploy | default(False) | bool - name: Hosted Install Checkpoint End hosts: localhost diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml index ed89d3bde..ac2d250a3 100644 --- a/playbooks/common/openshift-cluster/openshift_prometheus.yml +++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml @@ -3,4 +3,3 @@ hosts: oo_first_master roles: - role: openshift_prometheus - when: openshift_hosted_prometheus_deploy | default(False) | bool diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index 12cd209d2..e4193a00e 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -114,12 +114,18 @@ register: g_new_openshift_ca_mktemp changed_when: false -- include: ../../openshift-master/ca.yml +- name: Create OpenShift CA + hosts: oo_first_master vars: # Set openshift_ca_config_dir to a temporary directory where CA # will be created. We'll replace the existing CA with the CA # created in the temporary directory. openshift_ca_config_dir: "{{ hostvars[groups.oo_first_master.0].g_new_openshift_ca_mktemp.stdout }}" + roles: + - role: openshift_master_facts + - role: openshift_named_certificates + - role: openshift_ca + openshift_ca_host: "{{ groups.oo_first_master.0 }}" - name: Create temp directory for syncing certs hosts: localhost diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index 54c85f0fb..f64f0e003 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -68,6 +68,7 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False + openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml index d7cb38d03..43da5b629 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -68,6 +68,7 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False + openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index 6cdea7b84..e9cec9220 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -72,6 +72,7 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False + openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 8ab68002d..27d8515dc 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -76,6 +76,7 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False + openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 82faf743e..6c4f9671b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -80,6 +80,7 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False + openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 2cae231b4..82539dac8 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -10,6 +10,10 @@ installer_phase_etcd: "In Progress" aggregate: false +- include: ca.yml + +- include: certificates.yml + - name: Configure etcd hosts: oo_etcd_to_config any_errors_fatal: true diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml index 8aa508119..b5ba2bbba 100644 --- a/playbooks/common/openshift-etcd/scaleup.yml +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -20,7 +20,7 @@ /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} - -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} + -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_ip }}:{{ etcd_client_port }} member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }} delegate_to: "{{ etcd_ca_host }}" failed_when: diff --git a/playbooks/common/openshift-master/ca.yml b/playbooks/common/openshift-master/ca.yml deleted file mode 100644 index 5bb796fa3..000000000 --- a/playbooks/common/openshift-master/ca.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Create OpenShift CA - hosts: oo_masters_to_config - roles: - - role: openshift_master_facts - - role: openshift_named_certificates - - role: openshift_ca - openshift_ca_host: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 766e0e501..bc1fee982 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -10,6 +10,8 @@ installer_phase_master: "In Progress" aggregate: false +- include: certificates.yml + - name: Disable excluders hosts: oo_masters_to_config gather_facts: no diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index d007fac85..f4dc9df8a 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -47,8 +47,6 @@ - include: ../openshift-etcd/certificates.yml -- include: ../openshift-master/certificates.yml - - include: ../openshift-master/config.yml - include: ../openshift-loadbalancer/config.yml diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml index 2ad805858..9a6cf26fc 100644 --- a/playbooks/common/openshift-master/set_network_facts.yml +++ b/playbooks/common/openshift-master/set_network_facts.yml @@ -13,7 +13,9 @@ - name: Set network facts for masters hosts: oo_masters_to_config gather_facts: no - tasks: + roles: + - role: openshift_facts + post_tasks: - block: - set_fact: osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}" @@ -24,5 +26,9 @@ - set_fact: openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}" when: openshift_portal_net is not defined + - openshift_facts: + role: common + local_facts: + portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" when: - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 6fd8aa6f1..700aab48c 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -10,6 +10,8 @@ installer_phase_node: "In Progress" aggregate: false +- include: certificates.yml + - include: setup.yml - include: containerized_nodes.yml diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml new file mode 100644 index 000000000..fc06621ee --- /dev/null +++ b/playbooks/common/openshift-node/image_prep.yml @@ -0,0 +1,21 @@ +--- +- name: normalize groups + include: ../../byo/openshift-cluster/initialize_groups.yml + +- name: run the std_include + include: ../openshift-cluster/evaluate_groups.yml + +- name: run the std_include + include: ../openshift-cluster/initialize_facts.yml + +- name: run the std_include + include: ../openshift-cluster/initialize_openshift_repos.yml + +- name: run node config setup + include: setup.yml + +- name: run node config + include: configure_nodes.yml + +- name: Re-enable excluders + include: enable_excluders.yml diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml index 9eb9db316..fa982d533 100644 --- a/roles/ansible_service_broker/defaults/main.yml +++ b/roles/ansible_service_broker/defaults/main.yml @@ -6,6 +6,14 @@ ansible_service_broker_log_level: info ansible_service_broker_output_request: false ansible_service_broker_recovery: true ansible_service_broker_bootstrap_on_startup: true -# Recommended you do not enable this for now ansible_service_broker_dev_broker: false +ansible_service_broker_refresh_interval: 600s +# Recommended you do not enable this for now ansible_service_broker_launch_apb_on_bind: false + +ansible_service_broker_image_pull_policy: IfNotPresent +ansible_service_broker_sandbox_role: edit +ansible_service_broker_auto_escalate: true +ansible_service_broker_registry_tag: latest +ansible_service_broker_registry_whitelist: + - '.*-apb$' diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index b3797ef96..0f4b71124 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -17,16 +17,24 @@ ansible_service_broker_etcd_image_etcd_path: "{{ ansible_service_broker_etcd_image_etcd_path | default(__ansible_service_broker_etcd_image_etcd_path) }}" ansible_service_broker_registry_type: "{{ ansible_service_broker_registry_type | default(__ansible_service_broker_registry_type) }}" + ansible_service_broker_registry_name: "{{ ansible_service_broker_registry_name | default(__ansible_service_broker_registry_name) }}" ansible_service_broker_registry_url: "{{ ansible_service_broker_registry_url | default(__ansible_service_broker_registry_url) }}" ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}" ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}" ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}" + ansible_service_broker_certs_dir: "{{ openshift.common.config_base }}/service-catalog" + - name: set ansible-service-broker image facts using set prefix and tag set_fact: ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}" ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}" +- slurp: + src: "{{ ansible_service_broker_certs_dir }}/ca.crt" + register: catalog_ca + + - include: validate_facts.yml @@ -42,53 +50,119 @@ namespace: openshift-ansible-service-broker state: present -- name: Set SA cluster-role +- name: create ansible-service-broker client serviceaccount + oc_serviceaccount: + name: asb-client + namespace: openshift-ansible-service-broker + state: present + +- name: Create asb-auth cluster role + oc_clusterrole: + state: present + name: asb-auth + rules: + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["create", "delete"] + - apiGroups: ["authorization.openshift.io"] + resources: ["subjectrulesreview"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + +- name: Create asb-access cluster role + oc_clusterrole: + state: present + name: asb-access + rules: + - nonResourceURLs: ["/ansible-service-broker", "ansible-service-broker/*"] + verbs: ["get", "post", "put", "patch", "delete"] + +- name: Bind admin cluster-role to asb serviceaccount oc_adm_policy_user: state: present - namespace: "openshift-ansible-service-broker" + namespace: openshift-ansible-service-broker resource_kind: cluster-role resource_name: admin user: "system:serviceaccount:openshift-ansible-service-broker:asb" -- name: create ansible-service-broker service - oc_service: - name: asb +- name: Bind auth cluster role to asb service account + oc_adm_policy_user: + state: present namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-auth + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + +- name: Bind asb-access role to asb-client service account + oc_adm_policy_user: state: present - labels: - app: openshift-ansible-service-broker - service: asb - ports: - - name: port-1338 - port: 1338 - selector: - app: openshift-ansible-service-broker - service: asb + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-access + user: "system:serviceaccount:openshift-ansible-service-broker:asb-client" -- name: create etcd service - oc_service: - name: etcd +- name: create asb-client token secret + oc_obj: + name: asb-client + state: present + kind: Secret + content: + path: /tmp/asbclientsecretout + data: + apiVersion: v1 + kind: Secret + metadata: + name: asb-client + annotations: + kubernetes.io/service-account.name: asb-client + type: kubernetes.io/service-account-token + +# Using oc_obj because oc_service doesn't seem to allow annotations +# TODO: Extend oc_service to allow annotations +- name: create ansible-service-broker service + oc_obj: + name: asb namespace: openshift-ansible-service-broker state: present - ports: - - name: etcd-advertise - port: 2379 - selector: - app: openshift-ansible-service-broker - service: etcd + kind: Service + content: + path: /tmp/asbsvcout + data: + apiVersion: v1 + kind: Service + metadata: + name: asb + labels: + app: openshift-ansible-service-broker + service: asb + annotations: + service.alpha.openshift.io/serving-cert-secret-name: asb-tls + spec: + ports: + - name: port-1338 + port: 1338 + targetPort: 1338 + protocol: TCP + selector: + app: openshift-ansible-service-broker + service: asb - name: create route for ansible-service-broker service oc_route: name: asb-1338 namespace: openshift-ansible-service-broker state: present + labels: + app: openshift-ansible-service-broker + service: asb service_name: asb port: 1338 - register: asb_route_out - -- name: get ansible-service-broker route name - set_fact: - ansible_service_broker_route: "{{ asb_route_out.results.results[0].spec.host }}" + tls_termination: Reencrypt - name: create persistent volume claim for etcd oc_obj: @@ -97,7 +171,7 @@ state: present kind: PersistentVolumeClaim content: - path: /tmp/dcout + path: /tmp/pvcout data: apiVersion: v1 kind: PersistentVolumeClaim @@ -111,50 +185,61 @@ requests: storage: 1Gi -- name: create etcd deployment +- name: Create Ansible Service Broker deployment config oc_obj: - name: etcd + name: asb namespace: openshift-ansible-service-broker state: present - kind: Deployment + kind: DeploymentConfig content: path: /tmp/dcout data: - apiVersion: extensions/v1beta1 - kind: Deployment + apiVersion: v1 + kind: DeploymentConfig metadata: - name: etcd - namespace: openshift-ansible-service-broker + name: asb labels: app: openshift-ansible-service-broker - service: etcd + service: asb spec: + replicas: 1 selector: - matchLabels: - app: openshift-ansible-service-broker - service: etcd + app: openshift-ansible-service-broker strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - replicas: 1 + type: Rolling template: metadata: labels: app: openshift-ansible-service-broker - service: etcd + service: asb spec: - restartPolicy: Always + serviceAccount: asb containers: + - image: "{{ ansible_service_broker_image }}" + name: asb + imagePullPolicy: IfNotPresent + volumeMounts: + - name: config-volume + mountPath: /etc/ansible-service-broker + - name: asb-tls + mountPath: /etc/tls/private + ports: + - containerPort: 1338 + protocol: TCP + env: + - name: BROKER_CONFIG + value: /etc/ansible-service-broker/config.yaml + resources: {} + terminationMessagePath: /tmp/termination-log + - image: "{{ ansible_service_broker_etcd_image }}" name: etcd imagePullPolicy: IfNotPresent terminationMessagePath: /tmp/termination-log workingDir: /etcd args: - - '{{ ansible_service_broker_etcd_image_etcd_path }}' - - --data-dir=/data + - "{{ ansible_service_broker_etcd_image_etcd_path }}" + - "--data-dir=/data" - "--listen-client-urls=http://0.0.0.0:2379" - "--advertise-client-urls=http://0.0.0.0:2379" ports: @@ -170,57 +255,15 @@ - name: etcd persistentVolumeClaim: claimName: etcd - -- name: create ansible-service-broker deployment - oc_obj: - name: asb - namespace: openshift-ansible-service-broker - state: present - kind: Deployment - content: - path: /tmp/dcout - data: - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: asb - namespace: openshift-ansible-service-broker - labels: - app: openshift-ansible-service-broker - service: asb - spec: - strategy: - type: Recreate - replicas: 1 - template: - metadata: - labels: - app: openshift-ansible-service-broker - service: asb - spec: - serviceAccount: asb - restartPolicy: Always - containers: - - image: "{{ ansible_service_broker_image }}" - name: asb - imagePullPolicy: IfNotPresent - volumeMounts: - - name: config-volume - mountPath: /etc/ansible-service-broker - ports: - - containerPort: 1338 - protocol: TCP - env: - - name: BROKER_CONFIG - value: /etc/ansible-service-broker/config.yaml - terminationMessagePath: /tmp/termination-log - volumes: - name: config-volume configMap: name: broker-config items: - key: broker-config path: config.yaml + - name: asb-tls + secret: + secretName: asb-tls # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: @@ -239,42 +282,65 @@ name: broker-config namespace: openshift-ansible-service-broker labels: - app: ansible-service-broker + app: openshift-ansible-service-broker data: broker-config: | registry: - name: "{{ ansible_service_broker_registry_type }}" - url: "{{ ansible_service_broker_registry_url }}" - user: "{{ ansible_service_broker_registry_user }}" - pass: "{{ ansible_service_broker_registry_password }}" - org: "{{ ansible_service_broker_registry_organization }}" + - type: {{ ansible_service_broker_registry_type }} + name: {{ ansible_service_broker_registry_name }} + url: {{ ansible_service_broker_registry_url }} + user: {{ ansible_service_broker_registry_user }} + pass: {{ ansible_service_broker_registry_password }} + org: {{ ansible_service_broker_registry_organization }} + tag: {{ ansible_service_broker_registry_tag }} + white_list: {{ ansible_service_broker_registry_whitelist }} dao: - etcd_host: etcd + etcd_host: 0.0.0.0 etcd_port: 2379 log: logfile: /var/log/ansible-service-broker/asb.log stdout: true - level: "{{ ansible_service_broker_log_level }}" + level: {{ ansible_service_broker_log_level }} color: true - openshift: {} + openshift: + host: "" + ca_file: "" + bearer_token_file: "" + sandbox_role: {{ ansible_service_broker_sandbox_role }} + image_pull_policy: {{ ansible_service_broker_image_pull_policy }} broker: dev_broker: {{ ansible_service_broker_dev_broker | bool | lower }} + bootstrap_on_startup: {{ ansible_service_broker_bootstrap_on_startup | bool | lower }} + refresh_interval: {{ ansible_service_broker_refresh_interval }} launch_apb_on_bind: {{ ansible_service_broker_launch_apb_on_bind | bool | lower }} - recovery: {{ ansible_service_broker_recovery | bool | lower }} output_request: {{ ansible_service_broker_output_request | bool | lower }} - bootstrap_on_startup: {{ ansible_service_broker_bootstrap_on_startup | bool | lower }} + recovery: {{ ansible_service_broker_recovery | bool | lower }} + ssl_cert_key: /etc/tls/private/tls.key + ssl_cert: /etc/tls/private/tls.crt + auto_escalate: {{ ansible_service_broker_auto_escalate }} + auth: + - type: basic + enabled: false + - name: Create the Broker resource in the catalog oc_obj: name: ansible-service-broker state: present - kind: Broker + kind: ServiceBroker content: path: /tmp/brokerout data: apiVersion: servicecatalog.k8s.io/v1alpha1 - kind: Broker + kind: ServiceBroker metadata: name: ansible-service-broker spec: - url: http://asb.openshift-ansible-service-broker.svc:1338 + url: http://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker + authInfo: + bearer: + secretRef: + name: asb-client + namespace: openshift-ansible-service-broker + kind: Secret + caBundle: "{{ catalog_ca.content }}" diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml index 2519f9f4c..f0a6be226 100644 --- a/roles/ansible_service_broker/tasks/remove.yml +++ b/roles/ansible_service_broker/tasks/remove.yml @@ -1,16 +1,57 @@ --- -- name: remove openshift-ansible-service-broker project - oc_project: - name: openshift-ansible-service-broker - state: absent - - name: remove ansible-service-broker serviceaccount oc_serviceaccount: name: asb namespace: openshift-ansible-service-broker state: absent +- name: remove ansible-service-broker client serviceaccount + oc_serviceaccount: + name: asb-client + namespace: openshift-ansible-service-broker + state: absent + +- name: remove asb-auth cluster role + oc_clusterrole: + state: absent + name: asb-auth + +- name: remove asb-access cluster role + oc_clusterrole: + state: absent + name: asb-access + +- name: Unbind admin cluster-role to asb serviceaccount + oc_adm_policy_user: + state: absent + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: admin + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + +- name: Unbind auth cluster role to asb service account + oc_adm_policy_user: + state: absent + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-auth + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + +- name: Unbind asb-access role to asb-client service account + oc_adm_policy_user: + state: absent + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-access + user: "system:serviceaccount:openshift-ansible-service-broker:asb-client" + +- name: remove asb-client token secret + oc_secret: + state: absent + name: asb-client + namespace: openshift-ansible-service-broker + - name: remove ansible-service-broker service oc_service: name: asb @@ -35,19 +76,19 @@ namespace: openshift-ansible-service-broker state: absent -- name: remove etcd deployment +- name: remove Ansible Service Broker deployment config oc_obj: - name: etcd + name: asb namespace: openshift-ansible-service-broker + kind: DeploymentConfig state: absent - kind: Deployment -- name: remove ansible-service-broker deployment +- name: remove secret for broker auth oc_obj: - name: asb + name: asb-auth-secret namespace: openshift-ansible-service-broker + kind: Broker state: absent - kind: Deployment # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: - name: remove config map for ansible-service-broker @@ -62,4 +103,9 @@ oc_obj: name: ansible-service-broker state: absent - kind: Broker + kind: ServiceBroker + +- name: remove openshift-ansible-service-broker project + oc_project: + name: openshift-ansible-service-broker + state: absent diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml index 15e448515..3e9639adf 100644 --- a/roles/ansible_service_broker/vars/default_images.yml +++ b/roles/ansible_service_broker/vars/default_images.yml @@ -8,6 +8,7 @@ __ansible_service_broker_etcd_image_tag: latest __ansible_service_broker_etcd_image_etcd_path: /usr/local/bin/etcd __ansible_service_broker_registry_type: dockerhub +__ansible_service_broker_registry_name: dh __ansible_service_broker_registry_url: null __ansible_service_broker_registry_user: null __ansible_service_broker_registry_password: null diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml index ce2ae8365..9c576cb76 100644 --- a/roles/ansible_service_broker/vars/openshift-enterprise.yml +++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml @@ -7,7 +7,9 @@ __ansible_service_broker_etcd_image_prefix: rhel7/ __ansible_service_broker_etcd_image_tag: latest __ansible_service_broker_etcd_image_etcd_path: /bin/etcd + __ansible_service_broker_registry_type: rhcc +__ansible_service_broker_registry_name: rh __ansible_service_broker_registry_url: "https://registry.access.redhat.com" __ansible_service_broker_registry_user: null __ansible_service_broker_registry_password: null diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 7ece0e061..dae17c3ce 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -10,20 +10,28 @@ l_use_crio: "{{ openshift_use_crio | default(False) }}" l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}" +- when: + - openshift_deployment_type == 'openshift-enterprise' + assert: + that: + - "openshift_image_tag is defined" + msg: > + openshift_image_tag is a required inventory variable when installing openshift-enterprise + - name: Use Package Docker if Requested include: package_docker.yml when: - - not l_use_system_container - - not l_use_crio_only + - not l_use_system_container + - not l_use_crio_only - name: Use System Container Docker if Requested include: systemcontainer_docker.yml when: - - l_use_system_container - - not l_use_crio_only + - l_use_system_container + - not l_use_crio_only - name: Add CRI-O usage Requested include: systemcontainer_crio.yml when: - - l_use_crio - - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config'] + - l_use_crio + - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config'] diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index 3e81d5c8e..eab5c3bb1 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -61,15 +61,25 @@ - stat: path=/etc/sysconfig/docker register: docker_check -- name: Comment old registry params in /etc/sysconfig/docker +- name: Set registry params lineinfile: dest: /etc/sysconfig/docker regexp: '^{{ item.reg_conf_var }}=.*$' - line: "#{{ item.reg_conf_var }}=''# Moved to {{ containers_registries_conf_path }}" + line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" + when: + - item.reg_fact_val != [] + - docker_check.stat.isreg is defined + - docker_check.stat.isreg with_items: - reg_conf_var: ADD_REGISTRY + reg_fact_val: "{{ l2_docker_additional_registries }}" + reg_flag: --add-registry - reg_conf_var: BLOCK_REGISTRY + reg_fact_val: "{{ l2_docker_blocked_registries }}" + reg_flag: --block-registry - reg_conf_var: INSECURE_REGISTRY + reg_fact_val: "{{ l2_docker_insecure_registries }}" + reg_flag: --insecure-registry notify: - restart docker @@ -127,18 +137,6 @@ notify: - restart docker -- name: Check for credentials file for registry auth - stat: - path: "{{ docker_cli_auth_config_path }}/config.json" - when: oreg_auth_user is defined - register: docker_cli_auth_credentials_stat - -- name: Create credentials for docker cli registry auth - command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" - when: - - oreg_auth_user is defined - - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - - name: Start the Docker service systemd: name: docker @@ -153,4 +151,16 @@ - set_fact: docker_service_status_changed: "{{ r_docker_package_docker_start_result | changed }}" +- name: Check for credentials file for registry auth + stat: + path: "{{ docker_cli_auth_config_path }}/config.json" + when: oreg_auth_user is defined + register: docker_cli_auth_credentials_stat + +- name: Create credentials for docker cli registry auth + command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + - meta: flush_handlers diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 66ce475e1..386369d26 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -1,4 +1,5 @@ --- + # TODO: Much of this file is shared with container engine tasks - set_fact: l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" @@ -92,16 +93,24 @@ - block: - - name: Set to default prepend + - name: Set CRI-O image defaults set_fact: l_crio_image_prepend: "docker.io/gscrivano" l_crio_image_name: "cri-o-fedora" + l_crio_image_tag: "latest" - name: Use Centos based image when distribution is CentOS set_fact: l_crio_image_name: "cri-o-centos" when: ansible_distribution == "CentOS" + - name: Set CRI-O image tag + set_fact: + l_crio_image_tag: "{{ openshift_image_tag }}" + when: + - openshift_deployment_type == 'openshift-enterprise' + - openshift_image_tag is defined + - name: Use RHEL based image when distribution is Red Hat set_fact: l_crio_image_prepend: "registry.access.redhat.com/openshift3" @@ -110,7 +119,7 @@ - name: Set the full image name set_fact: - l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:latest" + l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}" # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548 - name: Use a specific image if requested diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 8b43393cb..5b1605b58 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -1,4 +1,5 @@ --- + # If docker_options are provided we should fail. We should not install docker and ignore # the users configuration. NOTE: docker_options == inventory:openshift_docker_options - name: Fail quickly if openshift_docker_options are set @@ -89,6 +90,14 @@ - name: Set to default prepend set_fact: l_docker_image_prepend: "gscrivano" + l_docker_image_tag: "latest" + + - name: Set container engine image tag + set_fact: + l_docker_image_tag: "{{ openshift_image_tag }}" + when: + - openshift_deployment_type == 'openshift-enterprise' + - openshift_image_tag is defined - name: Use Red Hat Registry for image when distribution is Red Hat set_fact: @@ -102,7 +111,7 @@ - name: Set the full image name set_fact: - l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest" + l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:{{ l_docker_image_tag }}" # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959 - name: Use a specific image if requested diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 1e6eb2386..05b2763d5 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -1421,7 +1421,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py index 8c6a81cc8..d1dc4caf8 100644 --- a/roles/lib_openshift/library/oc_adm_csr.py +++ b/roles/lib_openshift/library/oc_adm_csr.py @@ -1399,7 +1399,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 4a7847e88..152f270ab 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -1407,7 +1407,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index b8af5cad9..3082f5890 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -1393,7 +1393,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 3364f8de3..1ceaf5d0d 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -1393,7 +1393,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index c64d7ffd2..0771aa5a5 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -1511,7 +1511,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1886,13 +1886,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -1909,6 +1911,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 492494bda..146f71f68 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1536,7 +1536,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -2230,13 +2230,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -2253,6 +2255,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index b412ca8af..9761b4b4e 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -1385,7 +1385,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 8bbc22c49..047edffbb 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -1391,7 +1391,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index ad17051cb..0b6a8436b 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -1435,7 +1435,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index 74a84ac89..1f52fba40 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -1402,7 +1402,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index eea1516ae..1b63a6c13 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -1375,7 +1375,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index dc33d3b8a..94b08d9ce 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -1394,7 +1394,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index 88fd9554d..ad837fdb5 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -1411,7 +1411,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 8408f9ebc..892546e56 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -1414,7 +1414,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index d1be0b534..38df585f0 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -1346,7 +1346,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 9a281e6cd..70632f86d 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -1403,7 +1403,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index b503c330b..4eee748d7 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -1400,7 +1400,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 7a9e3bf89..2e73a7645 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -1407,7 +1407,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index 875e473ad..e003770d8 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -90,6 +90,12 @@ options: required: false default: str aliases: [] + labels: + description: + - The labels to apply on the route + required: false + default: None + aliases: [] tls_termination: description: - The options for termination. e.g. reencrypt @@ -1445,7 +1451,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1469,6 +1475,7 @@ class RouteConfig(object): sname, namespace, kubeconfig, + labels=None, destcacert=None, cacert=None, cert=None, @@ -1483,6 +1490,7 @@ class RouteConfig(object): self.kubeconfig = kubeconfig self.name = sname self.namespace = namespace + self.labels = labels self.host = host self.tls_termination = tls_termination self.destcacert = destcacert @@ -1508,6 +1516,8 @@ class RouteConfig(object): self.data['metadata'] = {} self.data['metadata']['name'] = self.name self.data['metadata']['namespace'] = self.namespace + if self.labels: + self.data['metadata']['labels'] = self.labels self.data['spec'] = {} self.data['spec']['host'] = self.host @@ -1715,6 +1725,7 @@ class OCRoute(OpenShiftCLI): rconfig = RouteConfig(params['name'], params['namespace'], params['kubeconfig'], + params['labels'], files['destcacert']['value'], files['cacert']['value'], files['cert']['value'], @@ -1819,6 +1830,7 @@ def main(): state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), + labels=dict(default=None, type='dict'), name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), tls_termination=dict(default=None, type='str'), diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index ec3635753..c142f1f43 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -1389,7 +1389,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index c010607e8..0614f359d 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -90,6 +90,12 @@ options: required: false default: default aliases: [] + annotations: + description: + - Annotations to apply to the object + required: false + default: None + aliases: [] files: description: - A list of files provided for secrets @@ -1441,7 +1447,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1464,13 +1470,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -1487,6 +1495,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): @@ -1698,8 +1708,7 @@ class OCSecret(OpenShiftCLI): elif params['contents']: files = Utils.create_tmp_files_from_contents(params['contents']) else: - return {'failed': True, - 'msg': 'Either specify files or contents.'} + files = [{'name': 'null', 'path': os.devnull}] ######## # Create @@ -1783,6 +1792,7 @@ def main(): debug=dict(default=False, type='bool'), namespace=dict(default='default', type='str'), name=dict(default=None, type='str'), + annotations=dict(default=None, type='dict'), type=dict(default=None, type='str'), files=dict(default=None, type='list'), delete_after=dict(default=False, type='bool'), diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index e83a6e26d..3e8aea4f1 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -1448,7 +1448,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 0d46bbf96..646a39224 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -1387,7 +1387,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 662d77ec1..99a8e8f3d 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -1387,7 +1387,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py index 574f109e4..e88f3ae8d 100644 --- a/roles/lib_openshift/library/oc_storageclass.py +++ b/roles/lib_openshift/library/oc_storageclass.py @@ -1405,7 +1405,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index e430546ee..7bbe38819 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -1447,7 +1447,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index a12620968..63adbd6ac 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -1359,7 +1359,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 134b2ad19..3c07f8d4b 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -1436,7 +1436,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/src/ansible/oc_route.py b/roles/lib_openshift/src/ansible/oc_route.py index f2f5c5095..969cf8bcd 100644 --- a/roles/lib_openshift/src/ansible/oc_route.py +++ b/roles/lib_openshift/src/ansible/oc_route.py @@ -13,6 +13,7 @@ def main(): state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), + labels=dict(default=None, type='dict'), name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), tls_termination=dict(default=None, type='str'), diff --git a/roles/lib_openshift/src/ansible/oc_secret.py b/roles/lib_openshift/src/ansible/oc_secret.py index faa7c1772..ee2827e69 100644 --- a/roles/lib_openshift/src/ansible/oc_secret.py +++ b/roles/lib_openshift/src/ansible/oc_secret.py @@ -15,6 +15,7 @@ def main(): debug=dict(default=False, type='bool'), namespace=dict(default='default', type='str'), name=dict(default=None, type='str'), + annotations=dict(default=None, type='dict'), type=dict(default=None, type='str'), files=dict(default=None, type='list'), delete_after=dict(default=False, type='bool'), diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py index 3a1bd732f..dc2f7977b 100644 --- a/roles/lib_openshift/src/class/oc_route.py +++ b/roles/lib_openshift/src/class/oc_route.py @@ -118,6 +118,7 @@ class OCRoute(OpenShiftCLI): rconfig = RouteConfig(params['name'], params['namespace'], params['kubeconfig'], + params['labels'], files['destcacert']['value'], files['cacert']['value'], files['cert']['value'], diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py index 4ee6443e9..5322d6241 100644 --- a/roles/lib_openshift/src/class/oc_secret.py +++ b/roles/lib_openshift/src/class/oc_secret.py @@ -142,8 +142,7 @@ class OCSecret(OpenShiftCLI): elif params['contents']: files = Utils.create_tmp_files_from_contents(params['contents']) else: - return {'failed': True, - 'msg': 'Either specify files or contents.'} + files = [{'name': 'null', 'path': os.devnull}] ######## # Create diff --git a/roles/lib_openshift/src/doc/route b/roles/lib_openshift/src/doc/route index a12999c9e..f0d38ab5f 100644 --- a/roles/lib_openshift/src/doc/route +++ b/roles/lib_openshift/src/doc/route @@ -39,6 +39,12 @@ options: required: false default: str aliases: [] + labels: + description: + - The labels to apply on the route + required: false + default: None + aliases: [] tls_termination: description: - The options for termination. e.g. reencrypt diff --git a/roles/lib_openshift/src/doc/secret b/roles/lib_openshift/src/doc/secret index 76b147f6f..a27f90f38 100644 --- a/roles/lib_openshift/src/doc/secret +++ b/roles/lib_openshift/src/doc/secret @@ -39,6 +39,12 @@ options: required: false default: default aliases: [] + annotations: + description: + - Annotations to apply to the object + required: false + default: None + aliases: [] files: description: - A list of files provided for secrets diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index 5a307cdb3..1fb32164e 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -597,7 +597,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/src/lib/route.py b/roles/lib_openshift/src/lib/route.py index 3b54a24fb..b106866cb 100644 --- a/roles/lib_openshift/src/lib/route.py +++ b/roles/lib_openshift/src/lib/route.py @@ -11,6 +11,7 @@ class RouteConfig(object): sname, namespace, kubeconfig, + labels=None, destcacert=None, cacert=None, cert=None, @@ -25,6 +26,7 @@ class RouteConfig(object): self.kubeconfig = kubeconfig self.name = sname self.namespace = namespace + self.labels = labels self.host = host self.tls_termination = tls_termination self.destcacert = destcacert @@ -50,6 +52,8 @@ class RouteConfig(object): self.data['metadata'] = {} self.data['metadata']['name'] = self.name self.data['metadata']['namespace'] = self.namespace + if self.labels: + self.data['metadata']['labels'] = self.labels self.data['spec'] = {} self.data['spec']['host'] = self.host diff --git a/roles/lib_openshift/src/lib/secret.py b/roles/lib_openshift/src/lib/secret.py index a1c202442..ad4b6aa36 100644 --- a/roles/lib_openshift/src/lib/secret.py +++ b/roles/lib_openshift/src/lib/secret.py @@ -10,13 +10,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -33,6 +35,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): diff --git a/roles/lib_openshift/src/test/unit/test_oc_route.py b/roles/lib_openshift/src/test/unit/test_oc_route.py index afdb5e4dc..5699f123b 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_route.py +++ b/roles/lib_openshift/src/test/unit/test_oc_route.py @@ -39,6 +39,7 @@ class OCRouteTest(unittest.TestCase): 'debug': False, 'name': 'test', 'namespace': 'default', + 'labels': {'route': 'route'}, 'tls_termination': 'passthrough', 'dest_cacert_path': None, 'cacert_path': None, @@ -64,7 +65,10 @@ class OCRouteTest(unittest.TestCase): "selfLink": "/oapi/v1/namespaces/default/routes/test", "uid": "1b127c67-ecd9-11e6-96eb-0e0d9bdacd26", "resourceVersion": "439182", - "creationTimestamp": "2017-02-07T01:59:48Z" + "creationTimestamp": "2017-02-07T01:59:48Z", + "labels": { + "route": "route" + } }, "spec": { "host": "test.example", @@ -141,6 +145,7 @@ class OCRouteTest(unittest.TestCase): 'debug': False, 'name': 'test', 'namespace': 'default', + 'labels': {'route': 'route'}, 'tls_termination': 'edge', 'dest_cacert_path': None, 'cacert_path': None, @@ -166,7 +171,8 @@ class OCRouteTest(unittest.TestCase): "namespace": "default", "resourceVersion": "517745", "selfLink": "/oapi/v1/namespaces/default/routes/test", - "uid": "b6f25898-ed77-11e6-9755-0e737db1e63a" + "uid": "b6f25898-ed77-11e6-9755-0e737db1e63a", + "labels": {"route": "route"} }, "spec": { "host": "test.openshift.com", @@ -250,6 +256,7 @@ metadata: self.assertTrue(results['changed']) self.assertEqual(results['state'], 'present') self.assertEqual(results['results']['results'][0]['metadata']['name'], 'test') + self.assertEqual(results['results']['results'][0]['metadata']['labels']['route'], 'route') # Making sure our mock was called as we expected mock_cmd.assert_has_calls([ diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md index 696efbea5..ff96081fe 100644 --- a/roles/openshift_aws/README.md +++ b/roles/openshift_aws/README.md @@ -23,7 +23,6 @@ From this role: | openshift_aws_ami_copy_wait | False | openshift_aws_users | [] | openshift_aws_launch_config_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }} -| openshift_aws_create_vpc | False | openshift_aws_node_group_type | master | openshift_aws_elb_cert_arn | '' | openshift_aws_kubernetes_cluster_status | owned @@ -72,7 +71,6 @@ Example Playbook vars: openshift_aws_clusterid: test openshift_aws_region: us-east-1 - openshift_aws_create_vpc: true ``` License diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index ca39c1aec..ea09857b0 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -1,5 +1,4 @@ --- -openshift_aws_create_vpc: True openshift_aws_create_s3: True openshift_aws_create_iam_cert: True openshift_aws_create_security_groups: True @@ -143,6 +142,11 @@ openshift_aws_elb_instance_filter: "tag:host-type": "{{ openshift_aws_node_group_type }}" instance-state-name: running +openshift_aws_launch_config_security_groups: +- "{{ openshift_aws_clusterid }}" # default sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s + openshift_aws_node_security_groups: default: name: "{{ openshift_aws_clusterid }}" diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml index 65c5a6cc0..e6be9969c 100644 --- a/roles/openshift_aws/tasks/launch_config.yml +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -4,13 +4,18 @@ when: - openshift_aws_ami is undefined +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + - name: fetch the security groups for launch config ec2_group_facts: filters: - group-name: - - "{{ openshift_aws_clusterid }}" # default sg - - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg - - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s + group-name: "{{ openshift_aws_launch_config_security_groups }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" region: "{{ openshift_aws_region }}" register: ec2sgs @@ -21,7 +26,7 @@ region: "{{ openshift_aws_region }}" image_id: "{{ openshift_aws_ami }}" instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}" - security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}" + security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}" user_data: |- #cloud-config {% if openshift_aws_node_group_type != 'master' %} diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml new file mode 100644 index 000000000..737cfc7a6 --- /dev/null +++ b/roles/openshift_aws/tasks/master_facts.yml @@ -0,0 +1,22 @@ +--- +- name: fetch elbs + ec2_elb_facts: + region: "{{ openshift_aws_region }}" + names: + - "{{ item }}" + with_items: + - "{{ openshift_aws_elb_name }}-external" + - "{{ openshift_aws_elb_name }}-internal" + delegate_to: localhost + register: elbs + +- debug: var=elbs + +- name: set fact + set_fact: + openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" + osm_custom_cors_origins: + - "{{ elbs.results[1].elbs[0].dns_name }}" + - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" + - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" + with_items: "{{ groups['masters'] }}" diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml index a2920b744..a8518d43a 100644 --- a/roles/openshift_aws/tasks/provision.yml +++ b/roles/openshift_aws/tasks/provision.yml @@ -1,16 +1,8 @@ --- -- when: openshift_aws_create_vpc | bool - name: create default vpc - include: vpc.yml - - when: openshift_aws_create_iam_cert | bool name: create the iam_cert for elb certificate include: iam_cert.yml -- when: openshift_aws_users | length > 0 - name: create aws ssh keypair - include: ssh_keys.yml - - when: openshift_aws_create_s3 | bool name: create s3 bucket for registry include: s3.yml diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/provision_instance.yml index 48555e5da..1384bae59 100644 --- a/roles/openshift_aws/tasks/build_ami.yml +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -1,16 +1,4 @@ --- -- when: openshift_aws_create_vpc | bool - name: create a vpc - include: vpc.yml - -- when: openshift_aws_users | length > 0 - name: create aws ssh keypair - include: ssh_keys.yml - -- when: openshift_aws_create_security_groups | bool - name: Create compute security_groups - include: security_group.yml - - name: query vpc ec2_vpc_net_facts: region: "{{ openshift_aws_region }}" @@ -33,7 +21,7 @@ key_name: "{{ openshift_aws_ssh_key_name }}" group: "{{ openshift_aws_build_ami_group }}" instance_type: m4.xlarge - vpc_subnet_id: "{{ subnetout.subnets[0].id }}" + vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}" image: "{{ openshift_aws_base_ami }}" volumes: - device_name: /dev/sdb @@ -46,3 +34,30 @@ Name: "{{ openshift_aws_base_ami_name }}" instance_tags: Name: "{{ openshift_aws_base_ami_name }}" + +- name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_base_ami_name }}" + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ instancesout.instances[0].public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + +- name: Pause 10 seconds to ensure ssh actually accepts logins + pause: + seconds: 20 + +- name: add host to nodes + add_host: + groups: nodes + name: "{{ instancesout.instances[0].public_dns_name }}" diff --git a/roles/openshift_aws/tasks/setup_master_group.yml b/roles/openshift_aws/tasks/setup_master_group.yml new file mode 100644 index 000000000..166f3b938 --- /dev/null +++ b/roles/openshift_aws/tasks/setup_master_group.yml @@ -0,0 +1,35 @@ +--- +- name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid }}" + +- name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region }}" + +- name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": master + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: add new master to masters group + add_host: + groups: "masters,etcd,nodes" + name: "{{ item.public_dns_name }}" + hostname: "{{ openshift_aws_clusterid }}-master-{{ item.id[:-5] }}" + with_items: "{{ instancesout.instances }}" + +- name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ item.public_dns_name }}" + timeout: 300 + search_regex: OpenSSH + with_items: "{{ instancesout.instances }}" diff --git a/roles/openshift_gcp/defaults/main.yml b/roles/openshift_gcp/defaults/main.yml new file mode 100644 index 000000000..18fc453b2 --- /dev/null +++ b/roles/openshift_gcp/defaults/main.yml @@ -0,0 +1,58 @@ +--- +openshift_gcp_prefix: '' + +openshift_gcp_create_network: True +openshift_gcp_create_registry_bucket: True +openshift_gcp_kubernetes_cluster_status: owned # or shared +openshift_gcp_node_group_type: master + +openshift_gcp_ssh_private_key: '' + +openshift_gcp_project: '' +openshift_gcp_clusterid: default +openshift_gcp_region: us-central1 +openshift_gcp_zone: us-central1-a + +openshift_gcp_network_name: "{{ openshift_gcp_prefix }}network" + +openshift_gcp_iam_service_account: '' +openshift_gcp_iam_service_account_keyfile: '' + +openshift_gcp_master_lb_timeout: 2m + +openshift_gcp_infra_network_instance_group: ig-i + +openshift_gcp_image: 'rhel-7' +openshift_gcp_base_image: rhel-7 + +openshift_gcp_registry_bucket_keyfile: '' +openshift_gcp_registry_bucket_name: "{{ openshift_gcp_prefix }}-docker-registry" + +openshift_gcp_node_group_config: + - name: master + suffix: m + tags: ocp-master + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 1 + - name: infra + suffix: i + tags: ocp-infra-node ocp-node + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 1 + - name: node + suffix: n + tags: ocp-node + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 3 + - name: node-flex + suffix: nf + tags: ocp-node + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 0 + +openshift_gcp_startup_script_file: '' +openshift_gcp_user_data_file: '' diff --git a/roles/openshift_gcp/templates/dns.j2.sh b/roles/openshift_gcp/templates/dns.j2.sh index eacf84b4d..a7475aaf5 100644 --- a/roles/openshift_gcp/templates/dns.j2.sh +++ b/roles/openshift_gcp/templates/dns.j2.sh @@ -2,12 +2,12 @@ set -euo pipefail -dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}" +dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" # Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist -if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null +if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null fi # Always output the expected nameservers as a comma delimited list -gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ',' +gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ',' diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh index e68e9683f..d72a11de1 100644 --- a/roles/openshift_gcp/templates/provision.j2.sh +++ b/roles/openshift_gcp/templates/provision.j2.sh @@ -2,36 +2,38 @@ set -euo pipefail -# Create SSH key for GCE -if [ ! -f "{{ gce_ssh_private_key }}" ]; then - ssh-keygen -t rsa -f "{{ gce_ssh_private_key }}" -C gce-provision-cloud-user -N '' - ssh-add "{{ gce_ssh_private_key }}" || true -fi +if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then + # Create SSH key for GCE + if [ ! -f "{{ openshift_gcp_ssh_private_key }}" ]; then + ssh-keygen -t rsa -f "{{ openshift_gcp_ssh_private_key }}" -C gce-provision-cloud-user -N '' + ssh-add "{{ openshift_gcp_ssh_private_key }}" || true + fi -# Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there -pub_key=$(cut -d ' ' -f 2 < "{{ gce_ssh_private_key }}.pub") -key_tmp_file='/tmp/ocp-gce-keys' -if ! gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q "$pub_key"; then - if gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q ssh-rsa; then - gcloud --project "{{ gce_project_id }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file" + # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there + pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub") + key_tmp_file='/tmp/ocp-gce-keys' + if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then + if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then + gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file" + fi + echo -n 'cloud-user:' >> "$key_tmp_file" + cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file" + gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}" + rm -f "$key_tmp_file" fi - echo -n 'cloud-user:' >> "$key_tmp_file" - cat "{{ gce_ssh_private_key }}.pub" >> "$key_tmp_file" - gcloud --project "{{ gce_project_id }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}" - rm -f "$key_tmp_file" fi metadata="" -if [[ -n "{{ provision_gce_startup_script_file }}" ]]; then - if [[ ! -f "{{ provision_gce_startup_script_file }}" ]]; then - echo "Startup script file missing at {{ provision_gce_startup_script_file }} from=$(pwd)" +if [[ -n "{{ openshift_gcp_startup_script_file }}" ]]; then + if [[ ! -f "{{ openshift_gcp_startup_script_file }}" ]]; then + echo "Startup script file missing at {{ openshift_gcp_startup_script_file }} from=$(pwd)" exit 1 fi - metadata+="--metadata-from-file=startup-script={{ provision_gce_startup_script_file }}" + metadata+="--metadata-from-file=startup-script={{ openshift_gcp_startup_script_file }}" fi -if [[ -n "{{ provision_gce_user_data_file }}" ]]; then - if [[ ! -f "{{ provision_gce_user_data_file }}" ]]; then - echo "User data file missing at {{ provision_gce_user_data_file }}" +if [[ -n "{{ openshift_gcp_user_data_file }}" ]]; then + if [[ ! -f "{{ openshift_gcp_user_data_file }}" ]]; then + echo "User data file missing at {{ openshift_gcp_user_data_file }}" exit 1 fi if [[ -n "${metadata}" ]]; then @@ -39,14 +41,14 @@ if [[ -n "{{ provision_gce_user_data_file }}" ]]; then else metadata="--metadata-from-file=" fi - metadata+="user-data={{ provision_gce_user_data_file }}" + metadata+="user-data={{ openshift_gcp_user_data_file }}" fi # Select image or image family -image="{{ provision_gce_registered_image }}" -if ! gcloud --project "{{ gce_project_id }}" compute images describe "${image}" &>/dev/null; then - if ! gcloud --project "{{ gce_project_id }}" compute images describe-from-family "${image}" &>/dev/null; then - echo "No compute image or image-family found, create an image named '{{ provision_gce_registered_image }}' to continue'" +image="{{ openshift_gcp_image }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe "${image}" &>/dev/null; then + if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe-from-family "${image}" &>/dev/null; then + echo "No compute image or image-family found, create an image named '{{ openshift_gcp_image }}' to continue'" exit 1 fi image="family/${image}" @@ -54,19 +56,19 @@ fi ### PROVISION THE INFRASTRUCTURE ### -dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}" +dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" # Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers -if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then +if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script" exit 1 fi # Create network -if ! gcloud --project "{{ gce_project_id }}" compute networks describe "{{ gce_network_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute networks create "{{ gce_network_name }}" --mode "auto" +if ! gcloud --project "{{ openshift_gcp_project }}" compute networks describe "{{ openshift_gcp_network_name }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute networks create "{{ openshift_gcp_network_name }}" --mode "auto" else - echo "Network '{{ gce_network_name }}' already exists" + echo "Network '{{ openshift_gcp_network_name }}' already exists" fi # Firewall rules in a form: @@ -87,56 +89,56 @@ declare -A FW_RULES=( ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node" ) for rule in "${!FW_RULES[@]}"; do - ( if ! gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute firewall-rules create "{{ provision_prefix }}$rule" --network "{{ gce_network_name }}" ${FW_RULES[$rule]} + ( if ! gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules create "{{ openshift_gcp_prefix }}$rule" --network "{{ openshift_gcp_network_name }}" ${FW_RULES[$rule]} else - echo "Firewall rule '{{ provision_prefix }}${rule}' already exists" + echo "Firewall rule '{{ openshift_gcp_prefix }}${rule}' already exists" fi ) & done # Master IP -( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-ssl-lb-ip" --global +( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global else - echo "IP '{{ provision_prefix }}master-ssl-lb-ip' already exists" + echo "IP '{{ openshift_gcp_prefix }}master-ssl-lb-ip' already exists" fi ) & # Internal master IP -( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" +( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" else - echo "IP '{{ provision_prefix }}master-network-lb-ip' already exists" + echo "IP '{{ openshift_gcp_prefix }}master-network-lb-ip' already exists" fi ) & # Router IP -( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" +( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" else - echo "IP '{{ provision_prefix }}router-network-lb-ip' already exists" + echo "IP '{{ openshift_gcp_prefix }}router-network-lb-ip' already exists" fi ) & -{% for node_group in provision_gce_node_groups %} +{% for node_group in openshift_gcp_node_group_config %} # configure {{ node_group.name }} ( - if ! gcloud --project "{{ gce_project_id }}" compute instance-templates describe "{{ provision_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute instance-templates create "{{ provision_prefix }}instance-template-{{ node_group.name }}" \ - --machine-type "{{ node_group.machine_type }}" --network "{{ gce_network_name }}" \ - --tags "{{ provision_prefix }}ocp,ocp,{{ node_group.tags }}" \ + if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \ + --machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \ + --tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ node_group.tags }}" \ --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \ --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \ --image "${image}" ${metadata} else - echo "Instance template '{{ provision_prefix }}instance-template-{{ node_group.name }}' already exists" + echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists" fi # Create instance group - if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed describe "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute instance-groups managed create "{{ provision_prefix }}ig-{{ node_group.suffix }}" \ - --zone "{{ gce_zone_name }}" --template "{{ provision_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}" + if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed describe "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed create "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" \ + --zone "{{ openshift_gcp_zone }}" --template "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}" else - echo "Instance group '{{ provision_prefix }}ig-{{ node_group.suffix }}' already exists" + echo "Instance group '{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}' already exists" fi ) & {% endfor %} @@ -147,36 +149,36 @@ for i in `jobs -p`; do wait $i; done # Configure the master external LB rules ( # Master health check -if ! gcloud --project "{{ gce_project_id }}" compute health-checks describe "{{ provision_prefix }}master-ssl-lb-health-check" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute health-checks create https "{{ provision_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz" +if ! gcloud --project "{{ openshift_gcp_project }}" compute health-checks describe "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute health-checks create https "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz" else - echo "Health check '{{ provision_prefix }}master-ssl-lb-health-check' already exists" + echo "Health check '{{ openshift_gcp_prefix }}master-ssl-lb-health-check' already exists" fi -gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-named-ports "{{ provision_prefix }}ig-m" \ - --zone "{{ gce_zone_name }}" --named-ports "{{ provision_prefix }}port-name-master:{{ internal_console_port }}" +gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-named-ports "{{ openshift_gcp_prefix }}ig-m" \ + --zone "{{ openshift_gcp_zone }}" --named-ports "{{ openshift_gcp_prefix }}port-name-master:{{ internal_console_port }}" # Master backend service -if ! gcloud --project "{{ gce_project_id }}" compute backend-services describe "{{ provision_prefix }}master-ssl-lb-backend" --global &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute backend-services create "{{ provision_prefix }}master-ssl-lb-backend" --health-checks "{{ provision_prefix }}master-ssl-lb-health-check" --port-name "{{ provision_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ provision_gce_master_https_timeout | default('2m') }}" - gcloud --project "{{ gce_project_id }}" compute backend-services add-backend "{{ provision_prefix }}master-ssl-lb-backend" --instance-group "{{ provision_prefix }}ig-m" --global --instance-group-zone "{{ gce_zone_name }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute backend-services describe "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --global &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute backend-services create "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --health-checks "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port-name "{{ openshift_gcp_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ openshift_gcp_master_lb_timeout }}" + gcloud --project "{{ openshift_gcp_project }}" compute backend-services add-backend "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --instance-group "{{ openshift_gcp_prefix }}ig-m" --global --instance-group-zone "{{ openshift_gcp_zone }}" else - echo "Backend service '{{ provision_prefix }}master-ssl-lb-backend' already exists" + echo "Backend service '{{ openshift_gcp_prefix }}master-ssl-lb-backend' already exists" fi # Master tcp proxy target -if ! gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies describe "{{ provision_prefix }}master-ssl-lb-target" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies create "{{ provision_prefix }}master-ssl-lb-target" --backend-service "{{ provision_prefix }}master-ssl-lb-backend" +if ! gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies describe "{{ openshift_gcp_prefix }}master-ssl-lb-target" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies create "{{ openshift_gcp_prefix }}master-ssl-lb-target" --backend-service "{{ openshift_gcp_prefix }}master-ssl-lb-backend" else - echo "Proxy target '{{ provision_prefix }}master-ssl-lb-target' already exists" + echo "Proxy target '{{ openshift_gcp_prefix }}master-ssl-lb-target' already exists" fi # Master forwarding rule -if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-ssl-lb-rule" --global &>/dev/null; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)') - gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ provision_prefix }}master-ssl-lb-target" +if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --global &>/dev/null; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)') + gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ openshift_gcp_prefix }}master-ssl-lb-target" else - echo "Forwarding rule '{{ provision_prefix }}master-ssl-lb-rule' already exists" + echo "Forwarding rule '{{ openshift_gcp_prefix }}master-ssl-lb-rule' already exists" fi ) & @@ -184,25 +186,25 @@ fi # Configure the master internal LB rules ( # Internal master health check -if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}master-network-lb-health-check" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz" +if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}master-network-lb-health-check" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz" else - echo "Health check '{{ provision_prefix }}master-network-lb-health-check' already exists" + echo "Health check '{{ openshift_gcp_prefix }}master-network-lb-health-check' already exists" fi # Internal master target pool -if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}master-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}master-network-lb-pool" --http-health-check "{{ provision_prefix }}master-network-lb-health-check" --region "{{ gce_region_name }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}master-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}master-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}master-network-lb-health-check" --region "{{ openshift_gcp_region }}" else - echo "Target pool '{{ provision_prefix }}master-network-lb-pool' already exists" + echo "Target pool '{{ openshift_gcp_prefix }}master-network-lb-pool' already exists" fi # Internal master forwarding rule -if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') - gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}master-network-lb-pool" +if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') + gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}master-network-lb-pool" else - echo "Forwarding rule '{{ provision_prefix }}master-network-lb-rule' already exists" + echo "Forwarding rule '{{ openshift_gcp_prefix }}master-network-lb-rule' already exists" fi ) & @@ -210,25 +212,25 @@ fi # Configure the infra node rules ( # Router health check -if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}router-network-lb-health-check" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz" +if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}router-network-lb-health-check" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz" else - echo "Health check '{{ provision_prefix }}router-network-lb-health-check' already exists" + echo "Health check '{{ openshift_gcp_prefix }}router-network-lb-health-check' already exists" fi # Router target pool -if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}router-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}router-network-lb-pool" --http-health-check "{{ provision_prefix }}router-network-lb-health-check" --region "{{ gce_region_name }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}router-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}router-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}router-network-lb-health-check" --region "{{ openshift_gcp_region }}" else - echo "Target pool '{{ provision_prefix }}router-network-lb-pool' already exists" + echo "Target pool '{{ openshift_gcp_prefix }}router-network-lb-pool' already exists" fi # Router forwarding rule -if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}router-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') - gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}router-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}router-network-lb-pool" +if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}router-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') + gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}router-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}router-network-lb-pool" else - echo "Forwarding rule '{{ provision_prefix }}router-network-lb-rule' already exists" + echo "Forwarding rule '{{ openshift_gcp_prefix }}router-network-lb-rule' already exists" fi ) & @@ -236,11 +238,11 @@ for i in `jobs -p`; do wait $i; done # set the target pools ( -if [[ "ig-m" == "{{ provision_gce_router_network_instance_group }}" ]]; then - gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool,{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}" +if [[ "ig-m" == "{{ openshift_gcp_infra_network_instance_group }}" ]]; then + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool,{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}" else - gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool" --zone "{{ gce_zone_name }}" - gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}{{ provision_gce_router_network_instance_group }}" --target-pools "{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}" + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool" --zone "{{ openshift_gcp_zone }}" + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}{{ openshift_gcp_infra_network_instance_group }}" --target-pools "{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}" fi ) & @@ -252,42 +254,42 @@ while true; do rm -f $dns # DNS record for master lb - if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)') + if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)') if [[ ! -f $dns ]]; then - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" fi - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP" else echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists" fi # DNS record for internal master lb - if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') + if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') if [[ ! -f $dns ]]; then - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" fi - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP" else echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists" fi # DNS record for router lb - if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') + if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') if [[ ! -f $dns ]]; then - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" fi - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP" - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}." + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}." else echo "DNS record for '{{ wildcard_zone }}' already exists" fi # Commit all DNS changes, retrying if preconditions are not met if [[ -f $dns ]]; then - if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then + if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then rc=$? if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then continue @@ -301,17 +303,17 @@ done # Create bucket for registry ( -if ! gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then - gsutil mb -p "{{ gce_project_id }}" -l "{{ gce_region_name }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" +if ! gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then + gsutil mb -p "{{ openshift_gcp_project }}" -l "{{ openshift_gcp_region }}" "gs://{{ openshift_gcp_registry_bucket_name }}" else - echo "Bucket '{{ openshift_hosted_registry_storage_gcs_bucket }}' already exists" + echo "Bucket '{{ openshift_gcp_registry_bucket_name }}' already exists" fi ) & # wait until all node groups are stable -{% for node_group in provision_gce_node_groups %} +{% for node_group in openshift_gcp_node_group_config %} # wait for stable {{ node_group.name }} -( gcloud --project "{{ gce_project_id }}" compute instance-groups managed wait-until-stable "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --timeout=300) & +( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=300) & {% endfor %} diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh index 41ceab2b5..a1e0affec 100644 --- a/roles/openshift_gcp/templates/remove.j2.sh +++ b/roles/openshift_gcp/templates/remove.j2.sh @@ -18,8 +18,8 @@ function teardown_cmd() { if [[ -z "${found}" ]]; then flag=$((flag+1)) fi - if gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then - gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag} + if gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag} fi } @@ -33,11 +33,11 @@ function teardown() { } # Preemptively spin down the instances -{% for node_group in provision_gce_node_groups %} +{% for node_group in openshift_gcp_node_group_config %} # scale down {{ node_group.name }} ( # performs a delete and scale down as one operation to ensure maximum parallelism - if ! instances=$( gcloud --project "{{ gce_project_id }}" compute instance-groups managed list-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --format='value[terminator=","](instance)' ); then + if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' ); then exit 0 fi instances="${instances%?}" @@ -45,7 +45,7 @@ function teardown() { echo "warning: No instances in {{ node_group.name }}" 1>&2 exit 0 fi - if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed delete-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --instances "${instances}"; then + if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed delete-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --instances "${instances}"; then echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2 exit 0 fi @@ -54,15 +54,15 @@ function teardown() { # Bucket for registry ( -if gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then - gsutil -m rm -r "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" +if gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then + gsutil -m rm -r "gs://{{ openshift_gcp_registry_bucket_name }}" fi ) & # DNS ( -dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}" -if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then +dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" +if gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then # Retry DNS changes until they succeed since this may be a shared resource while true; do dns="${TMPDIR:-/tmp}/dns.yaml" @@ -70,16 +70,16 @@ if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zon # export all dns records that match into a zone format, and turn each line into a set of args for # record-sets transaction. - gcloud dns record-sets export --project "{{ gce_project_id }}" -z "${dns_zone}" --zone-file-format "${dns}" + gcloud dns record-sets export --project "{{ openshift_gcp_project }}" -z "${dns_zone}" --zone-file-format "${dns}" if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \ awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input" then rm -f "${dns}" - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" - cat "${dns}.input" | xargs -L1 gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + cat "${dns}.input" | xargs -L1 gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}" # Commit all DNS changes, retrying if preconditions are not met - if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then + if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then rc=$? if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then continue @@ -95,25 +95,25 @@ fi ( # Router network rules -teardown "{{ provision_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}" -teardown "{{ provision_prefix }}router-network-lb-pool" compute target-pools --region "{{ gce_region_name }}" -teardown "{{ provision_prefix }}router-network-lb-health-check" compute http-health-checks -teardown "{{ provision_prefix }}router-network-lb-ip" compute addresses --region "{{ gce_region_name }}" +teardown "{{ openshift_gcp_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}router-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}router-network-lb-health-check" compute http-health-checks +teardown "{{ openshift_gcp_prefix }}router-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}" # Internal master network rules -teardown "{{ provision_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}" -teardown "{{ provision_prefix }}master-network-lb-pool" compute target-pools --region "{{ gce_region_name }}" -teardown "{{ provision_prefix }}master-network-lb-health-check" compute http-health-checks -teardown "{{ provision_prefix }}master-network-lb-ip" compute addresses --region "{{ gce_region_name }}" +teardown "{{ openshift_gcp_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}master-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}master-network-lb-health-check" compute http-health-checks +teardown "{{ openshift_gcp_prefix }}master-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}" ) & ( # Master SSL network rules -teardown "{{ provision_prefix }}master-ssl-lb-rule" compute forwarding-rules --global -teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-tcp-proxies -teardown "{{ provision_prefix }}master-ssl-lb-ip" compute addresses --global -teardown "{{ provision_prefix }}master-ssl-lb-backend" compute backend-services --global -teardown "{{ provision_prefix }}master-ssl-lb-health-check" compute health-checks +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-rule" compute forwarding-rules --global +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-target" compute target-tcp-proxies +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-ip" compute addresses --global +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-backend" compute backend-services --global +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" compute health-checks ) & #Firewall rules @@ -130,10 +130,10 @@ declare -A FW_RULES=( ['infra-node-external']="" ) for rule in "${!FW_RULES[@]}"; do - ( if gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then + ( if gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then # retry a few times because this call can be flaky for i in `seq 1 3`; do - if gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"; then + if gcloud -q --project "{{ openshift_gcp_project }}" compute firewall-rules delete "{{ openshift_gcp_prefix }}$rule"; then break fi done @@ -142,15 +142,15 @@ done for i in `jobs -p`; do wait $i; done -{% for node_group in provision_gce_node_groups %} +{% for node_group in openshift_gcp_node_group_config %} # teardown {{ node_group.name }} - any load balancers referencing these groups must be removed ( - teardown "{{ provision_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ gce_zone_name }}" - teardown "{{ provision_prefix }}instance-template-{{ node_group.name }}" compute instance-templates + teardown "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ openshift_gcp_zone }}" + teardown "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" compute instance-templates ) & {% endfor %} for i in `jobs -p`; do wait $i; done # Network -teardown "{{ gce_network_name }}" compute networks +teardown "{{ openshift_gcp_network_name }}" compute networks diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py index cdf56e959..7956559c6 100644 --- a/roles/openshift_health_checker/openshift_checks/disk_availability.py +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -15,31 +15,31 @@ class DiskAvailability(OpenShiftCheck): # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements recommended_disk_space_bytes = { '/var': { - 'masters': 40 * 10**9, - 'nodes': 15 * 10**9, - 'etcd': 20 * 10**9, + 'oo_masters_to_config': 40 * 10**9, + 'oo_nodes_to_config': 15 * 10**9, + 'oo_etcd_to_config': 20 * 10**9, }, # Used to copy client binaries into, # see roles/openshift_cli/library/openshift_container_binary_sync.py. '/usr/local/bin': { - 'masters': 1 * 10**9, - 'nodes': 1 * 10**9, - 'etcd': 1 * 10**9, + 'oo_masters_to_config': 1 * 10**9, + 'oo_nodes_to_config': 1 * 10**9, + 'oo_etcd_to_config': 1 * 10**9, }, # Used as temporary storage in several cases. tempfile.gettempdir(): { - 'masters': 1 * 10**9, - 'nodes': 1 * 10**9, - 'etcd': 1 * 10**9, + 'oo_masters_to_config': 1 * 10**9, + 'oo_nodes_to_config': 1 * 10**9, + 'oo_etcd_to_config': 1 * 10**9, }, } # recommended disk space for each location under an upgrade context recommended_disk_upgrade_bytes = { '/var': { - 'masters': 10 * 10**9, - 'nodes': 5 * 10 ** 9, - 'etcd': 5 * 10 ** 9, + 'oo_masters_to_config': 10 * 10**9, + 'oo_nodes_to_config': 5 * 10 ** 9, + 'oo_etcd_to_config': 5 * 10 ** 9, }, } @@ -61,9 +61,9 @@ class DiskAvailability(OpenShiftCheck): number = float(user_config) user_config = { '/var': { - 'masters': number, - 'nodes': number, - 'etcd': number, + 'oo_masters_to_config': number, + 'oo_nodes_to_config': number, + 'oo_etcd_to_config': number, }, } except TypeError: diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 93a5973d4..63ccadcd1 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -1,5 +1,6 @@ """Check that required Docker images are available.""" +from ansible.module_utils import six from openshift_checks import OpenShiftCheck from openshift_checks.mixins import DockerHostMixin @@ -113,7 +114,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # template for images that run on top of OpenShift image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}") image_url = self.get_var("oreg_url", default="") or image_url - if 'nodes' in host_groups: + if 'oo_nodes_to_config' in host_groups: for suffix in NODE_IMAGE_SUFFIXES: required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag)) # The registry-console is for some reason not prefixed with ose- like the other components. @@ -124,13 +125,13 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): # images for containerized components if self.get_var("openshift", "common", "is_containerized"): components = set() - if 'nodes' in host_groups: + if 'oo_nodes_to_config' in host_groups: components.update(["node", "openvswitch"]) - if 'masters' in host_groups: # name is "origin" or "ose" + if 'oo_masters_to_config' in host_groups: # name is "origin" or "ose" components.add(image_info["name"]) for component in components: required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag)) - if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise + if 'oo_etcd_to_config' in host_groups: # special case, note it is the same for origin/enterprise required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag return required @@ -153,7 +154,15 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): def known_docker_registries(self): """Build a list of docker registries available according to inventory vars.""" - regs = list(self.get_var("openshift_docker_additional_registries", default=[])) + regs = self.get_var("openshift_docker_additional_registries", default=[]) + # https://bugzilla.redhat.com/show_bug.cgi?id=1497274 + # if the result was a string type, place it into a list. We must do this + # as using list() on a string will split the string into its characters. + if isinstance(regs, six.string_types): + regs = [regs] + else: + # Otherwise cast to a list as was done previously + regs = list(regs) deployment_type = self.get_var("openshift_deployment_type") if deployment_type == "origin" and "docker.io" not in regs: diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py index b4c8957e9..8b20ccb49 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py @@ -12,7 +12,7 @@ class EtcdTraffic(OpenShiftCheck): def is_active(self): """Skip hosts that do not have etcd in their group names.""" group_names = self.get_var("group_names", default=[]) - valid_group_names = "etcd" in group_names + valid_group_names = "oo_etcd_to_config" in group_names version = self.get_major_minor_version(self.get_var("openshift_image_tag")) valid_version = version in ((3, 4), (3, 5)) diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py index 79955cb2f..3d75da6f9 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_volume.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py @@ -15,7 +15,11 @@ class EtcdVolume(OpenShiftCheck): etcd_mount_path = "/var/lib/etcd" def is_active(self): - etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or [] + etcd_hosts = ( + self.get_var("groups", "oo_etcd_to_config", default=[]) or + self.get_var("groups", "oo_masters_to_config", default=[]) or + [] + ) is_etcd_host = self.get_var("ansible_host") in etcd_hosts return super(EtcdVolume, self).is_active() and is_etcd_host diff --git a/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py b/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py index d783e6760..e93cc9028 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py +++ b/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py @@ -46,7 +46,7 @@ class FluentdConfig(LoggingCheck): # if check is running on a master, retrieve all running pods # and check any pod's container for the env var "USE_JOURNAL" group_names = self.get_var("group_names") - if "masters" in group_names: + if "oo_masters_to_config" in group_names: use_journald = self.check_fluentd_env_var() docker_info = self.execute_module("docker_info", {}) diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py index 765ba072d..e7a8ec976 100644 --- a/roles/openshift_health_checker/openshift_checks/memory_availability.py +++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py @@ -14,9 +14,9 @@ class MemoryAvailability(OpenShiftCheck): # Values taken from the official installation documentation: # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements recommended_memory_bytes = { - "masters": 16 * GIB, - "nodes": 8 * GIB, - "etcd": 8 * GIB, + "oo_masters_to_config": 16 * GIB, + "oo_nodes_to_config": 8 * GIB, + "oo_etcd_to_config": 8 * GIB, } # https://access.redhat.com/solutions/3006511 physical RAM is partly reserved from memtotal memtotal_adjustment = 1 * GIB diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index b90ebf6dd..cfbdea303 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -21,9 +21,11 @@ class DockerHostMixin(object): def is_active(self): """Only run on hosts that depend on Docker.""" - is_containerized = self.get_var("openshift", "common", "is_containerized") - is_node = "nodes" in self.get_var("group_names", default=[]) - return super(DockerHostMixin, self).is_active() and (is_containerized or is_node) + group_names = set(self.get_var("group_names", default=[])) + needs_docker = set(["oo_nodes_to_config"]) + if self.get_var("openshift.common.is_containerized"): + needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"]) + return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker)) def ensure_dependencies(self): """ diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py index 363c12def..416805c4d 100644 --- a/roles/openshift_health_checker/openshift_checks/ovs_version.py +++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py @@ -24,7 +24,7 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck): def is_active(self): """Skip hosts that do not have package requirements.""" group_names = self.get_var("group_names", default=[]) - master_or_node = 'masters' in group_names or 'nodes' in group_names + master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names return super(OvsVersion, self).is_active() and master_or_node def run(self): diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py index 21355c2f0..090e438ff 100644 --- a/roles/openshift_health_checker/openshift_checks/package_availability.py +++ b/roles/openshift_health_checker/openshift_checks/package_availability.py @@ -20,9 +20,9 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck): packages = set() - if "masters" in group_names: + if "oo_masters_to_config" in group_names: packages.update(self.master_packages(rpm_prefix)) - if "nodes" in group_names: + if "oo_nodes_to_config" in group_names: packages.update(self.node_packages(rpm_prefix)) args = {"packages": sorted(set(packages))} diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index d4aec3ed8..2f09b22fc 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -36,7 +36,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): def is_active(self): """Skip hosts that do not have package requirements.""" group_names = self.get_var("group_names", default=[]) - master_or_node = 'masters' in group_names or 'nodes' in group_names + master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names return super(PackageVersion, self).is_active() and master_or_node def run(self): diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py index 9ae679b79..29a325a17 100644 --- a/roles/openshift_health_checker/test/disk_availability_test.py +++ b/roles/openshift_health_checker/test/disk_availability_test.py @@ -4,11 +4,11 @@ from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckE @pytest.mark.parametrize('group_names,is_active', [ - (['masters'], True), - (['nodes'], True), - (['etcd'], True), - (['masters', 'nodes'], True), - (['masters', 'etcd'], True), + (['oo_masters_to_config'], True), + (['oo_nodes_to_config'], True), + (['oo_etcd_to_config'], True), + (['oo_masters_to_config', 'oo_nodes_to_config'], True), + (['oo_masters_to_config', 'oo_etcd_to_config'], True), ([], False), (['lb'], False), (['nfs'], False), @@ -39,7 +39,7 @@ def test_is_active(group_names, is_active): ]) def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): task_vars = dict( - group_names=['masters'], + group_names=['oo_masters_to_config'], ansible_mounts=ansible_mounts, ) @@ -52,7 +52,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): @pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [ ( - ['masters'], + ['oo_masters_to_config'], 0, [{ 'mount': '/', @@ -60,7 +60,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): }], ), ( - ['nodes'], + ['oo_nodes_to_config'], 0, [{ 'mount': '/', @@ -68,7 +68,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): }], ), ( - ['etcd'], + ['oo_etcd_to_config'], 0, [{ 'mount': '/', @@ -76,7 +76,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): }], ), ( - ['etcd'], + ['oo_etcd_to_config'], 1, # configure lower threshold [{ 'mount': '/', @@ -84,7 +84,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): }], ), ( - ['etcd'], + ['oo_etcd_to_config'], 0, [{ # not enough space on / ... @@ -112,7 +112,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib @pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [ ( 'test with no space available', - ['masters'], + ['oo_masters_to_config'], 0, [{ 'mount': '/', @@ -122,7 +122,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test with a higher configured required value', - ['masters'], + ['oo_masters_to_config'], 100, # set a higher threshold [{ 'mount': '/', @@ -132,7 +132,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test with 1GB available, but "0" GB space requirement', - ['nodes'], + ['oo_nodes_to_config'], 0, [{ 'mount': '/', @@ -142,7 +142,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test with no space available, but "0" GB space requirement', - ['etcd'], + ['oo_etcd_to_config'], 0, [{ 'mount': '/', @@ -152,7 +152,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test with enough space for a node, but not for a master', - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], 0, [{ 'mount': '/', @@ -162,7 +162,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ), ( 'test failure with enough space on "/", but not enough on "/var"', - ['etcd'], + ['oo_etcd_to_config'], 0, [{ # enough space on / ... @@ -194,7 +194,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a @pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [ ( 'test without enough space for master under "upgrade" context', - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], "upgrade", [{ 'mount': '/', @@ -206,7 +206,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a ), ( 'test with enough space for master under "upgrade" context', - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], "upgrade", [{ 'mount': '/', @@ -218,7 +218,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a ), ( 'test with not enough space for master, and non-upgrade context', - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], "health", [{ 'mount': '/', diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index c523ffd5c..43dcf0c9a 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -16,19 +16,19 @@ def task_vars(): ), openshift_deployment_type='origin', openshift_image_tag='', - group_names=['nodes', 'masters'], + group_names=['oo_nodes_to_config', 'oo_masters_to_config'], ) @pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [ - ("origin", True, [], True), - ("openshift-enterprise", True, [], True), ("invalid", True, [], False), ("", True, [], False), ("origin", False, [], False), ("openshift-enterprise", False, [], False), - ("origin", False, ["nodes", "masters"], True), - ("openshift-enterprise", False, ["etcd"], False), + ("origin", False, ["oo_nodes_to_config", "oo_masters_to_config"], True), + ("openshift-enterprise", False, ["oo_etcd_to_config"], False), + ("origin", True, ["nfs"], False), + ("openshift-enterprise", True, ["lb"], False), ]) def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active): task_vars['openshift_deployment_type'] = deployment_type @@ -126,7 +126,7 @@ def test_no_known_registries(): openshift_docker_additional_registries=["docker.io"], openshift_deployment_type="openshift-enterprise", openshift_image_tag='latest', - group_names=['nodes', 'masters'], + group_names=['oo_nodes_to_config', 'oo_masters_to_config'], )) dia.known_docker_registries = mock_known_docker_registries actual = dia.run() @@ -205,7 +205,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo @pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [ ( # standard set of stuff required on nodes - "origin", False, ['nodes'], None, + "origin", False, ['oo_nodes_to_config'], None, set([ 'openshift/origin-pod:vtest', 'openshift/origin-deployer:vtest', @@ -215,7 +215,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo ]) ), ( # set a different URL for images - "origin", False, ['nodes'], 'foo.io/openshift/origin-${component}:${version}', + "origin", False, ['oo_nodes_to_config'], 'foo.io/openshift/origin-${component}:${version}', set([ 'foo.io/openshift/origin-pod:vtest', 'foo.io/openshift/origin-deployer:vtest', @@ -225,7 +225,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo ]) ), ( - "origin", True, ['nodes', 'masters', 'etcd'], None, + "origin", True, ['oo_nodes_to_config', 'oo_masters_to_config', 'oo_etcd_to_config'], None, set([ # images running on top of openshift 'openshift/origin-pod:vtest', @@ -241,7 +241,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo ]) ), ( # enterprise images - "openshift-enterprise", True, ['nodes'], 'foo.io/openshift3/ose-${component}:f13ac45', + "openshift-enterprise", True, ['oo_nodes_to_config'], 'foo.io/openshift3/ose-${component}:f13ac45', set([ 'foo.io/openshift3/ose-pod:f13ac45', 'foo.io/openshift3/ose-deployer:f13ac45', @@ -255,7 +255,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo ]) ), ( - "openshift-enterprise", True, ['etcd', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45', + "openshift-enterprise", True, ['oo_etcd_to_config', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45', set([ 'registry.access.redhat.com/rhel7/etcd', # lb does not yet come in a containerized version @@ -288,7 +288,7 @@ def test_containerized_etcd(): ), ), openshift_deployment_type="origin", - group_names=['etcd'], + group_names=['oo_etcd_to_config'], ) expected = set(['registry.access.redhat.com/rhel7/etcd']) assert expected == DockerImageAvailability(task_vars=task_vars).required_images() diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py index e0dccc062..8fa68c378 100644 --- a/roles/openshift_health_checker/test/docker_storage_test.py +++ b/roles/openshift_health_checker/test/docker_storage_test.py @@ -5,9 +5,9 @@ from openshift_checks.docker_storage import DockerStorage @pytest.mark.parametrize('is_containerized, group_names, is_active', [ - (False, ["masters", "etcd"], False), - (False, ["masters", "nodes"], True), - (True, ["etcd"], True), + (False, ["oo_masters_to_config", "oo_etcd_to_config"], False), + (False, ["oo_masters_to_config", "oo_nodes_to_config"], True), + (True, ["oo_etcd_to_config"], True), ]) def test_is_active(is_containerized, group_names, is_active): task_vars = dict( diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py index fae3e578d..dd6f4ad81 100644 --- a/roles/openshift_health_checker/test/etcd_traffic_test.py +++ b/roles/openshift_health_checker/test/etcd_traffic_test.py @@ -4,14 +4,14 @@ from openshift_checks.etcd_traffic import EtcdTraffic @pytest.mark.parametrize('group_names,version,is_active', [ - (['masters'], "3.5", False), - (['masters'], "3.6", False), - (['nodes'], "3.4", False), - (['etcd'], "3.4", True), - (['etcd'], "1.5", True), - (['etcd'], "3.1", False), - (['masters', 'nodes'], "3.5", False), - (['masters', 'etcd'], "3.5", True), + (['oo_masters_to_config'], "3.5", False), + (['oo_masters_to_config'], "3.6", False), + (['oo_nodes_to_config'], "3.4", False), + (['oo_etcd_to_config'], "3.4", True), + (['oo_etcd_to_config'], "1.5", True), + (['oo_etcd_to_config'], "3.1", False), + (['oo_masters_to_config', 'oo_nodes_to_config'], "3.5", False), + (['oo_masters_to_config', 'oo_etcd_to_config'], "3.5", True), ([], "3.4", False), ]) def test_is_active(group_names, version, is_active): @@ -23,9 +23,9 @@ def test_is_active(group_names, version, is_active): @pytest.mark.parametrize('group_names,matched,failed,extra_words', [ - (["masters"], True, True, ["Higher than normal", "traffic"]), - (["masters", "etcd"], False, False, []), - (["etcd"], False, False, []), + (["oo_masters_to_config"], True, True, ["Higher than normal", "traffic"]), + (["oo_masters_to_config", "oo_etcd_to_config"], False, False, []), + (["oo_etcd_to_config"], False, False, []), ]) def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words): def execute_module(module_name, *_): diff --git a/roles/openshift_health_checker/test/fluentd_config_test.py b/roles/openshift_health_checker/test/fluentd_config_test.py index 10db253bc..b5b4858d6 100644 --- a/roles/openshift_health_checker/test/fluentd_config_test.py +++ b/roles/openshift_health_checker/test/fluentd_config_test.py @@ -82,7 +82,7 @@ def test_check_logging_config_non_master(name, use_journald, logging_driver, ext return {} task_vars = dict( - group_names=["nodes", "etcd"], + group_names=["oo_nodes_to_config", "oo_etcd_to_config"], openshift_logging_fluentd_use_journal=use_journald, openshift=dict( common=dict(config_base=""), @@ -128,7 +128,7 @@ def test_check_logging_config_non_master_failed(name, use_journald, logging_driv return {} task_vars = dict( - group_names=["nodes", "etcd"], + group_names=["oo_nodes_to_config", "oo_etcd_to_config"], openshift_logging_fluentd_use_journal=use_journald, openshift=dict( common=dict(config_base=""), @@ -192,7 +192,7 @@ def test_check_logging_config_master(name, pods, logging_driver, extra_words): return {} task_vars = dict( - group_names=["masters"], + group_names=["oo_masters_to_config"], openshift=dict( common=dict(config_base=""), ), @@ -274,7 +274,7 @@ def test_check_logging_config_master_failed(name, pods, logging_driver, words): return {} task_vars = dict( - group_names=["masters"], + group_names=["oo_masters_to_config"], openshift=dict( common=dict(config_base=""), ), @@ -331,7 +331,7 @@ def test_check_logging_config_master_fails_on_unscheduled_deployment(name, pods, return {} task_vars = dict( - group_names=["masters"], + group_names=["oo_masters_to_config"], openshift=dict( common=dict(config_base=""), ), diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py index aee2f0416..5ec83dd79 100644 --- a/roles/openshift_health_checker/test/memory_availability_test.py +++ b/roles/openshift_health_checker/test/memory_availability_test.py @@ -4,11 +4,11 @@ from openshift_checks.memory_availability import MemoryAvailability @pytest.mark.parametrize('group_names,is_active', [ - (['masters'], True), - (['nodes'], True), - (['etcd'], True), - (['masters', 'nodes'], True), - (['masters', 'etcd'], True), + (['oo_masters_to_config'], True), + (['oo_nodes_to_config'], True), + (['oo_etcd_to_config'], True), + (['oo_masters_to_config', 'oo_nodes_to_config'], True), + (['oo_masters_to_config', 'oo_etcd_to_config'], True), ([], False), (['lb'], False), (['nfs'], False), @@ -22,32 +22,32 @@ def test_is_active(group_names, is_active): @pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb', [ ( - ['masters'], + ['oo_masters_to_config'], 0, 17200, ), ( - ['nodes'], + ['oo_nodes_to_config'], 0, 8200, ), ( - ['nodes'], + ['oo_nodes_to_config'], 1, # configure lower threshold 2000, # too low for recommended but not for configured ), ( - ['nodes'], + ['oo_nodes_to_config'], 2, # configure threshold where adjustment pushes it over 1900, ), ( - ['etcd'], + ['oo_etcd_to_config'], 0, 8200, ), ( - ['masters', 'nodes'], + ['oo_masters_to_config', 'oo_nodes_to_config'], 0, 17000, ), @@ -66,43 +66,43 @@ def test_succeeds_with_recommended_memory(group_names, configured_min, ansible_m @pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb,extra_words', [ ( - ['masters'], + ['oo_masters_to_config'], 0, 0, ['0.0 GiB'], ), ( - ['nodes'], + ['oo_nodes_to_config'], 0, 100, ['0.1 GiB'], ), ( - ['nodes'], + ['oo_nodes_to_config'], 24, # configure higher threshold 20 * 1024, # enough to meet recommended but not configured ['20.0 GiB'], ), ( - ['nodes'], + ['oo_nodes_to_config'], 24, # configure higher threshold 22 * 1024, # not enough for adjustment to push over threshold ['22.0 GiB'], ), ( - ['etcd'], + ['oo_etcd_to_config'], 0, 6 * 1024, ['6.0 GiB'], ), ( - ['etcd', 'masters'], + ['oo_etcd_to_config', 'oo_masters_to_config'], 0, 9 * 1024, # enough memory for etcd, not enough for a master ['9.0 GiB'], ), ( - ['nodes', 'masters'], + ['oo_nodes_to_config', 'oo_masters_to_config'], 0, # enough memory for a node, not enough for a master 11 * 1024, diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py index 602f32989..5a82a43bf 100644 --- a/roles/openshift_health_checker/test/ovs_version_test.py +++ b/roles/openshift_health_checker/test/ovs_version_test.py @@ -67,14 +67,14 @@ def test_ovs_package_version(openshift_release, expected_ovs_version): @pytest.mark.parametrize('group_names,is_containerized,is_active', [ - (['masters'], False, True), + (['oo_masters_to_config'], False, True), # ensure check is skipped on containerized installs - (['masters'], True, False), - (['nodes'], False, True), - (['masters', 'nodes'], False, True), - (['masters', 'etcd'], False, True), + (['oo_masters_to_config'], True, False), + (['oo_nodes_to_config'], False, True), + (['oo_masters_to_config', 'oo_nodes_to_config'], False, True), + (['oo_masters_to_config', 'oo_etcd_to_config'], False, True), ([], False, False), - (['etcd'], False, False), + (['oo_etcd_to_config'], False, False), (['lb'], False, False), (['nfs'], False, False), ]) diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py index b34e8fbfc..9815acb38 100644 --- a/roles/openshift_health_checker/test/package_availability_test.py +++ b/roles/openshift_health_checker/test/package_availability_test.py @@ -26,7 +26,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active): ( dict( openshift=dict(common=dict(service_type='origin')), - group_names=['masters'], + group_names=['oo_masters_to_config'], ), set(['origin-master']), set(['origin-node']), @@ -34,7 +34,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active): ( dict( openshift=dict(common=dict(service_type='atomic-openshift')), - group_names=['nodes'], + group_names=['oo_nodes_to_config'], ), set(['atomic-openshift-node']), set(['atomic-openshift-master']), @@ -42,7 +42,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active): ( dict( openshift=dict(common=dict(service_type='atomic-openshift')), - group_names=['masters', 'nodes'], + group_names=['oo_masters_to_config', 'oo_nodes_to_config'], ), set(['atomic-openshift-master', 'atomic-openshift-node']), set(), diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index 8564cd4db..3cf4ce033 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -97,14 +97,14 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc @pytest.mark.parametrize('group_names,is_containerized,is_active', [ - (['masters'], False, True), + (['oo_masters_to_config'], False, True), # ensure check is skipped on containerized installs - (['masters'], True, False), - (['nodes'], False, True), - (['masters', 'nodes'], False, True), - (['masters', 'etcd'], False, True), + (['oo_masters_to_config'], True, False), + (['oo_nodes_to_config'], False, True), + (['oo_masters_to_config', 'oo_nodes_to_config'], False, True), + (['oo_masters_to_config', 'oo_etcd_to_config'], False, True), ([], False, False), - (['etcd'], False, False), + (['oo_etcd_to_config'], False, False), (['lb'], False, False), (['nfs'], False, False), ]) diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 53d1a8bc7..47dc9171d 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -1,9 +1,11 @@ --- +# openshift_*_selector variables have been deprecated in favor of +# openshift_hosted_*_selector variables. - set_fact: - openshift_hosted_router_selector: "{{ openshift_hosted_infra_selector }}" + openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}" when: openshift_hosted_router_selector is not defined and openshift_hosted_infra_selector is defined - set_fact: - openshift_hosted_registry_selector: "{{ openshift_hosted_infra_selector }}" + openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" when: openshift_hosted_registry_selector is not defined and openshift_hosted_infra_selector is defined - name: Set hosted facts diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 5574a1446..6e7e2557f 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -8,7 +8,6 @@ openshift_logging_labels: {} openshift_logging_label_key: "" openshift_logging_label_value: "" openshift_logging_install_logging: False -openshift_logging_uninstall_logging: False openshift_logging_purge_logging: False openshift_logging_image_pull_secret: "" @@ -95,7 +94,7 @@ openshift_logging_es_pvc_dynamic: "{{ openshift_logging_elasticsearch_pvc_dynami openshift_logging_es_pvc_size: "{{ openshift_logging_elasticsearch_pvc_size | default('') }}" openshift_logging_es_pvc_prefix: "{{ openshift_logging_elasticsearch_pvc_prefix | default('logging-es') }}" openshift_logging_es_recover_after_time: 5m -openshift_logging_es_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}" +openshift_logging_es_storage_group: "65534" openshift_logging_es_nodeselector: {} # openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml openshift_logging_es_config: {} @@ -134,7 +133,7 @@ openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pv openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}" openshift_logging_es_ops_pvc_prefix: "{{ openshift_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}" openshift_logging_es_ops_recover_after_time: 5m -openshift_logging_es_ops_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}" +openshift_logging_es_ops_storage_group: "65534" openshift_logging_es_ops_nodeselector: {} # for exposing es-ops to external (outside of the cluster) clients diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index eac086e81..330e7e59a 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -45,6 +45,21 @@ def map_from_pairs(source, delim="="): return dict(item.split(delim) for item in source.split(",")) +def serviceaccount_name(qualified_sa): + ''' Returns the simple name from a fully qualified name ''' + return qualified_sa.split(":")[-1] + + +def serviceaccount_namespace(qualified_sa, default=None): + ''' Returns the namespace from a fully qualified name ''' + seg = qualified_sa.split(":") + if len(seg) > 1: + return seg[-2] + if default: + return default + return seg[-1] + + # pylint: disable=too-few-public-methods class FilterModule(object): ''' OpenShift Logging Filters ''' @@ -56,5 +71,7 @@ class FilterModule(object): 'random_word': random_word, 'entry_from_named_pair': entry_from_named_pair, 'map_from_pairs': map_from_pairs, - 'es_storage': es_storage + 'es_storage': es_storage, + 'serviceaccount_name': serviceaccount_name, + 'serviceaccount_namespace': serviceaccount_namespace } diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index 35accfb78..f10df8da5 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -171,22 +171,25 @@ class OpenshiftLoggingFacts(OCBaseCommand): if comp is not None: spec = dc_item["spec"]["template"]["spec"] facts = dict( + name=name, selector=dc_item["spec"]["selector"], replicas=dc_item["spec"]["replicas"], serviceAccount=spec["serviceAccount"], containers=dict(), volumes=dict() ) + if "nodeSelector" in spec: + facts["nodeSelector"] = spec["nodeSelector"] + if "supplementalGroups" in spec["securityContext"]: + facts["storageGroups"] = spec["securityContext"]["supplementalGroups"] + facts["spec"] = spec if "volumes" in spec: for vol in spec["volumes"]: clone = copy.deepcopy(vol) clone.pop("name", None) facts["volumes"][vol["name"]] = clone for container in spec["containers"]: - facts["containers"][container["name"]] = dict( - image=container["image"], - resources=container["resources"], - ) + facts["containers"][container["name"]] = container self.add_facts_for(comp, "deploymentconfigs", name, facts) def facts_for_services(self, namespace): diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 3040d15ca..ffed956a4 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -92,6 +92,7 @@ with_items: - rolebinding-reader - daemonset-admin + - prometheus-metrics-viewer # delete our configmaps - name: delete configmaps diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 9c8f0986a..f526fd734 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -139,10 +139,10 @@ # TODO: make idempotent - name: Generate proxy session - set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}} + set_fact: session_secret={{ 200 | oo_random_word}} check_mode: no # TODO: make idempotent - name: Generate oauth client secret - set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}} + set_fact: oauth_secret={{ 64 | oo_random_word}} check_mode: no diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 2695ef030..21fd79c28 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -69,17 +69,18 @@ vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" - openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}" + openshift_logging_elasticsearch_deployment_name: "{{ item.0.name }}" openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" - openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" - openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" + openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}" + openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}" + _es_containers: "{{item.0.containers}}" with_together: - - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs }}" + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}" - "{{ openshift_logging_facts.elasticsearch.pvcs }}" - "{{ es_indices }}" when: @@ -95,8 +96,6 @@ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" - openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" - openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} @@ -123,7 +122,7 @@ vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" - openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}" + openshift_logging_elasticsearch_deployment_name: "{{ item.0.name }}" openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}" openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" @@ -134,16 +133,18 @@ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" - openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}" + openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}" + openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}" openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}" openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}" openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}" openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}" openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}" openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}" + _es_containers: "{{item.0.containers}}" with_together: - - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs }}" + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}" - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}" - "{{ es_ops_indices }}" when: diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 0da9771c7..15f6a23e6 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -36,7 +36,7 @@ - include: delete_logging.yaml when: - - openshift_logging_uninstall_logging | default(false) | bool + - not openshift_logging_install_logging | default(false) | bool - name: Cleaning up local temp dir local_action: file path="{{local_tmp.stdout}}" state=absent diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml index 49e8a18af..f60fa8d7d 100644 --- a/roles/openshift_logging/vars/openshift-enterprise.yml +++ b/roles/openshift_logging/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('v3.6') }}" +__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('v3.7') }}" diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml index 75bd479be..554aa5bb2 100644 --- a/roles/openshift_logging_elasticsearch/defaults/main.yml +++ b/roles/openshift_logging_elasticsearch/defaults/main.yml @@ -6,7 +6,7 @@ openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_ openshift_logging_elasticsearch_namespace: logging openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector | default('') }}" -openshift_logging_elasticsearch_cpu_limit: 1000m +openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_cpu_limit | default('1000m') }}" openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_memory_limit | default('1Gi') }}" openshift_logging_elasticsearch_recover_after_time: "{{ openshift_logging_es_recover_after_time | default('5m') }}" @@ -33,13 +33,19 @@ openshift_logging_elasticsearch_pvc_size: "" openshift_logging_elasticsearch_pvc_dynamic: false openshift_logging_elasticsearch_pvc_pv_selector: {} openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce'] -openshift_logging_elasticsearch_storage_group: '65534' +openshift_logging_elasticsearch_storage_group: ['65534'] openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}" # config the es plugin to write kibana index based on the index mode openshift_logging_elasticsearch_kibana_index_mode: 'unique' +openshift_logging_elasticsearch_proxy_image_prefix: "openshift/oauth-proxy" +openshift_logging_elasticsearch_proxy_image_version: "v1.0.0" +openshift_logging_elasticsearch_proxy_cpu_limit: "100m" +openshift_logging_elasticsearch_proxy_memory_limit: "64Mi" +openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus" + # this is used to determine if this is an operations deployment or a non-ops deployment # simply used for naming purposes openshift_logging_elasticsearch_ops_deployment: false diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 1e800b1d6..df2c17aa0 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -37,6 +37,7 @@ # we want to make sure we have all the necessary components here # service account + - name: Create ES service account oc_serviceaccount: state: present @@ -77,6 +78,38 @@ resource_name: rolebinding-reader user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace }}:aggregated-logging-elasticsearch" +- oc_adm_policy_user: + state: present + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + resource_kind: cluster-role + resource_name: system:auth-delegator + user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace}}:aggregated-logging-elasticsearch" + +# logging-metrics-reader role +- template: + src: logging-metrics-role.j2 + dest: "{{mktemp.stdout}}/templates/logging-metrics-role.yml" + vars: + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + role_namespace: "{{ openshift_logging_elasticsearch_prometheus_sa | serviceaccount_namespace(openshift_logging_elasticsearch_namespace) }}" + role_user: "{{ openshift_logging_elasticsearch_prometheus_sa | serviceaccount_name }}" + +- name: Create logging-metrics-reader-role + command: > + {{ openshift.common.client_binary }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + -n "{{ openshift_logging_elasticsearch_namespace }}" + create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml" + register: prometheus_out + check_mode: no + ignore_errors: yes + +- fail: + msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}" + when: + - "prometheus_out.stderr | length > 0" + - "'already exists' not in prometheus_out.stderr" + # View role and binding - name: Generate logging-elasticsearch-view-role template: @@ -206,6 +239,32 @@ - port: 9200 targetPort: "restapi" +- name: Set logging-{{ es_component}}-prometheus service + oc_service: + state: present + name: "logging-{{es_component}}-prometheus" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + labels: + logging-infra: 'support' + ports: + - name: proxy + port: 443 + targetPort: 4443 + selector: + component: "{{ es_component }}-prometheus" + provider: openshift + +- oc_edit: + kind: service + name: "logging-{{es_component}}-prometheus" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + separator: '#' + content: + metadata#annotations#service.alpha.openshift.io/serving-cert-secret-name: "prometheus-tls" + metadata#annotations#prometheus.io/scrape: "true" + metadata#annotations#prometheus.io/scheme: "https" + metadata#annotations#prometheus.io/path: "_prometheus/metrics" + - name: Check to see if PVC already exists oc_obj: state: list @@ -260,7 +319,7 @@ delete_after: true - set_fact: - es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 'abcdefghijklmnopqrstuvwxyz0123456789' | random_word(8) }}" + es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}" when: openshift_logging_elasticsearch_deployment_name == "" - set_fact: @@ -281,6 +340,8 @@ es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}" es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}" es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}" + es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}" + es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}" deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}" es_replicas: 1 diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 3c8f390c4..1ed886627 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -29,7 +29,9 @@ spec: serviceAccountName: aggregated-logging-elasticsearch securityContext: supplementalGroups: - - {{openshift_logging_elasticsearch_storage_group}} +{% for group in es_storage_groups %} + - {{group}} +{% endfor %} {% if es_node_selector is iterable and es_node_selector | length > 0 %} nodeSelector: {% for key, value in es_node_selector.iteritems() %} @@ -37,6 +39,40 @@ spec: {% endfor %} {% endif %} containers: + - name: proxy + image: {{openshift_logging_elasticsearch_proxy_image_prefix}}:{{openshift_logging_elasticsearch_proxy_image_version}} + imagePullPolicy: Always + args: + - --upstream-ca=/etc/elasticsearch/secret/admin-ca + - --https-address=:4443 + - -provider=openshift + - -client-id={{openshift_logging_elasticsearch_prometheus_sa}} + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret={{ 16 | oo_random_word | b64encode }} + - -upstream=https://localhost:9200 + - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}' + - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - -pass-access-token + - -pass-user-headers + ports: + - containerPort: 4443 + name: proxy + protocol: TCP + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + readOnly: true + - mountPath: /etc/elasticsearch/secret + name: elasticsearch + readOnly: true + resources: + limits: + cpu: "{{openshift_logging_elasticsearch_proxy_cpu_limit }}" + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" + requests: + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" - name: "elasticsearch" image: {{image}} @@ -49,6 +85,9 @@ spec: {% endif %} requests: memory: "{{es_memory_limit}}" +{% if es_container_security_context %} + securityContext: {{ es_container_security_context | to_yaml }} +{% endif %} ports: - containerPort: 9200 @@ -94,7 +133,7 @@ spec: value: "30" - name: "POD_LABEL" - value: "component={{component}}" + value: "component={{component}}" - name: "IS_MASTER" value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}" @@ -102,6 +141,9 @@ spec: - name: "HAS_DATA" value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}" + - + name: "PROMETHEUS_USER" + value: "{{openshift_logging_elasticsearch_prometheus_sa}}" volumeMounts: - name: elasticsearch @@ -120,6 +162,9 @@ spec: timeoutSeconds: 30 periodSeconds: 5 volumes: + - name: proxy-tls + secret: + secretName: prometheus-tls - name: elasticsearch secret: secretName: logging-elasticsearch diff --git a/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 b/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 new file mode 100644 index 000000000..d9800e5a5 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 @@ -0,0 +1,31 @@ +--- +apiVersion: v1 +kind: List +items: +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: Role + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: prometheus-metrics-viewer + namespace: {{ namespace }} + rules: + - apiGroups: + - metrics.openshift.io + resources: + - prometheus + verbs: + - view +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + name: prometheus-metrics-viewer + namespace: {{ namespace }} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-metrics-viewer + subjects: + - kind: ServiceAccount + namespace: {{ role_namespace }} + name: {{ role_user }} diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index 084b734ee..8da74430f 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -1,7 +1,6 @@ --- openshift_metrics_start_cluster: True openshift_metrics_install_metrics: False -openshift_metrics_uninstall_metrics: False openshift_metrics_startup_timeout: 500 openshift_metrics_hawkular_replicas: 1 diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index c92458c50..10509fc1e 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -49,7 +49,7 @@ - include: uninstall_metrics.yaml when: - - openshift_metrics_uninstall_metrics | bool + - not openshift_metrics_install_metrics | bool - include: uninstall_hosa.yaml when: not openshift_metrics_install_hawkular_agent | bool diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml index 68cdf06fe..5a1728de5 100644 --- a/roles/openshift_metrics/vars/openshift-enterprise.yml +++ b/roles/openshift_metrics/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_metrics_image_prefix: "registry.access.redhat.com/openshift3/" -__openshift_metrics_image_version: "v3.6" +__openshift_metrics_image_version: "v3.7" diff --git a/roles/openshift_named_certificates/tasks/named_certificates.yml b/roles/openshift_named_certificates/tasks/named_certificates.yml deleted file mode 100644 index 7b097b443..000000000 --- a/roles/openshift_named_certificates/tasks/named_certificates.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- name: Clear named certificates - file: - path: "{{ named_certs_dir }}" - state: absent - when: overwrite_named_certs | bool - -- name: Ensure named certificate directory exists - file: - path: "{{ named_certs_dir }}" - state: directory - mode: 0700 - -- name: Land named certificates - copy: - src: "{{ item.certfile }}" - dest: "{{ named_certs_dir }}" - with_items: "{{ openshift_master_named_certificates | default([]) }}" - -- name: Land named certificate keys - copy: - src: "{{ item.keyfile }}" - dest: "{{ named_certs_dir }}" - mode: 0600 - with_items: "{{ openshift_master_named_certificates | default([]) }}" - -- name: Land named CA certificates - copy: - src: "{{ item }}" - dest: "{{ named_certs_dir }}" - mode: 0600 - with_items: "{{ openshift_master_named_certificates | default([]) | oo_collect('cafile') }}" diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 2759188f3..e3898b520 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -46,6 +46,22 @@ notify: - restart node +- name: Configure AWS Cloud Provider Settings + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + create: true + with_items: + - regex: '^AWS_ACCESS_KEY_ID=' + line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}" + - regex: '^AWS_SECRET_ACCESS_KEY=' + line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" + no_log: True + when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined + notify: + - restart node + # Necessary because when you're on a node that's also a master the master will be # restarted after the node restarts docker and it will take up to 60 seconds for # systemd to start the master again diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e82fb42b8..59b8bb76e 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -76,22 +76,6 @@ include: config.yml when: not openshift_node_bootstrap -- name: Configure AWS Cloud Provider Settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - create: true - with_items: - - regex: '^AWS_ACCESS_KEY_ID=' - line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}" - - regex: '^AWS_SECRET_ACCESS_KEY=' - line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" - no_log: True - when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined - notify: - - restart node - #### Storage class plugins here #### - name: NFS storage plugin configuration include: storage_plugins/nfs.yml diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py deleted file mode 100644 index d42c9bdb9..000000000 --- a/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py +++ /dev/null @@ -1,25 +0,0 @@ -''' - Openshift Logging class that provides useful filters used in Logging. - - This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml -''' - - -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - -# pylint: disable=too-few-public-methods -class FilterModule(object): - ''' OpenShift Logging Filters ''' - - # pylint: disable=no-self-use, too-few-public-methods - def filters(self): - ''' Returns the names of the filters provided by this class ''' - return { - 'map_from_pairs': map_from_pairs - } diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py new file mode 100644 index 000000000..72c47b8ee --- /dev/null +++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py @@ -0,0 +1,44 @@ +''' + Openshift Sanitize inventory class that provides useful filters used in Logging. +''' + + +import re + + +# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(item.split(delim) for item in source.split(",")) + + +def vars_with_pattern(source, pattern=""): + ''' Returns a list of variables whose name matches the given pattern ''' + if source == '': + return list() + + var_list = list() + + var_pattern = re.compile(pattern) + + for item in source: + if var_pattern.match(item): + var_list.append(item) + + return var_list + + +# pylint: disable=too-few-public-methods +class FilterModule(object): + ''' OpenShift Logging Filters ''' + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + ''' Returns the names of the filters provided by this class ''' + return { + 'map_from_pairs': map_from_pairs, + 'vars_with_pattern': vars_with_pattern + } diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml index 24e44ea85..39bf1780a 100644 --- a/roles/openshift_sanitize_inventory/tasks/unsupported.yml +++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml @@ -10,3 +10,25 @@ Starting in 3.6 openshift_use_dnsmasq must be true or critical features will not function. This also means that NetworkManager must be installed enabled and responsible for management of the primary interface. + +- set_fact: + __using_dynamic: True + when: + - hostvars[inventory_hostname][item] in ['dynamic'] + with_items: + - "{{ hostvars[inventory_hostname] | vars_with_pattern(pattern='openshift_.*_storage_kind') }}" + +- name: Ensure that dynamic provisioning is set if using dynamic storage + when: + - dynamic_volumes_check | default(true) | bool + - not openshift_master_dynamic_provisioning_enabled | default(false) | bool + - not openshift_cloudprovider_kind is defined + - __using_dynamic is defined and __using_dynamic | bool + fail: + msg: |- + Using a storage kind of 'dynamic' without enabling dynamic provisioning nor + setting a cloud provider will cause generated PVCs to not be able to bind as + intended. Either update to not use a dynamic storage or set + openshift_master_dynamic_provisioning_enabled to True and set an + openshift_cloudprovider_kind. You can disable this check with + 'dynamic_volumes_check=False'. diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 54a6dd7c3..074904bec 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -1,6 +1,6 @@ --- - name: Create heketi DB volume - command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json" + command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image}}:{{ glusterfs_heketi_version }} --listfile /tmp/heketi-storage.json" register: setup_storage - name: Copy heketi-storage list |