summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/ansible_service_broker/defaults/main.yml1
-rw-r--r--roles/ansible_service_broker/tasks/main.yml4
-rw-r--r--roles/calico/tasks/main.yml22
-rw-r--r--roles/cockpit-ui/defaults/main.yml3
-rw-r--r--roles/cockpit-ui/tasks/main.yml4
-rw-r--r--roles/docker/defaults/main.yml21
-rw-r--r--roles/docker/tasks/main.yml1
-rw-r--r--roles/docker/tasks/package_docker.yml40
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml44
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml6
-rw-r--r--roles/docker/templates/registries.conf46
-rw-r--r--roles/etcd/defaults/main.yaml78
-rwxr-xr-xroles/etcd/library/delegated_serial_command.py (renamed from roles/etcd_common/library/delegated_serial_command.py)0
-rw-r--r--roles/etcd/meta/main.yml3
-rw-r--r--roles/etcd/tasks/auxiliary/clean_data.yml (renamed from roles/etcd_migrate/tasks/clean_data.yml)0
-rw-r--r--roles/etcd/tasks/auxiliary/drop_etcdctl.yml (renamed from roles/etcd_common/tasks/drop_etcdctl.yml)2
-rw-r--r--roles/etcd/tasks/backup.yml2
-rw-r--r--roles/etcd/tasks/backup/backup.yml (renamed from roles/etcd_common/tasks/backup.yml)5
-rw-r--r--roles/etcd/tasks/backup_ca_certificates.yml2
-rw-r--r--roles/etcd/tasks/backup_generated_certificates.yml2
-rw-r--r--roles/etcd/tasks/backup_server_certificates.yml2
-rw-r--r--roles/etcd/tasks/ca.yml2
-rw-r--r--roles/etcd/tasks/certificates/backup_ca_certificates.yml12
-rw-r--r--roles/etcd/tasks/certificates/backup_generated_certificates.yml13
-rw-r--r--roles/etcd/tasks/certificates/backup_server_certificates.yml11
-rw-r--r--roles/etcd/tasks/certificates/deploy_ca.yml (renamed from roles/etcd_ca/tasks/main.yml)4
-rw-r--r--roles/etcd/tasks/certificates/distribute_ca.yml47
-rw-r--r--roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml (renamed from roles/etcd_client_certificates/tasks/main.yml)2
-rw-r--r--roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml (renamed from roles/etcd_server_certificates/tasks/main.yml)4
-rw-r--r--roles/etcd/tasks/certificates/remove_ca_certificates.yml5
-rw-r--r--roles/etcd/tasks/certificates/remove_generated_certificates.yml5
-rw-r--r--roles/etcd/tasks/certificates/retrieve_ca_certificates.yml8
-rw-r--r--roles/etcd/tasks/clean_data.yml2
-rw-r--r--roles/etcd/tasks/client_certificates.yml2
-rw-r--r--roles/etcd/tasks/distribute_ca2
-rw-r--r--roles/etcd/tasks/drop_etcdctl.yml2
-rw-r--r--roles/etcd/tasks/main.yml5
-rw-r--r--roles/etcd/tasks/migrate.add_ttls.yml2
-rw-r--r--roles/etcd/tasks/migrate.configure_master.yml2
-rw-r--r--roles/etcd/tasks/migrate.pre_check.yml2
-rw-r--r--roles/etcd/tasks/migrate.yml2
-rw-r--r--roles/etcd/tasks/migration/add_ttls.yml (renamed from roles/etcd_migrate/tasks/add_ttls.yml)1
-rw-r--r--roles/etcd/tasks/migration/check.yml (renamed from roles/etcd_migrate/tasks/check.yml)0
-rw-r--r--roles/etcd/tasks/migration/check_cluster_health.yml (renamed from roles/etcd_migrate/tasks/check_cluster_health.yml)0
-rw-r--r--roles/etcd/tasks/migration/check_cluster_status.yml (renamed from roles/etcd_migrate/tasks/check_cluster_status.yml)0
-rw-r--r--roles/etcd/tasks/migration/configure_master.yml (renamed from roles/etcd_migrate/tasks/configure.yml)0
-rw-r--r--roles/etcd/tasks/migration/migrate.yml (renamed from roles/etcd_migrate/tasks/migrate.yml)0
-rw-r--r--roles/etcd/tasks/remove_ca_certificates.yml2
-rw-r--r--roles/etcd/tasks/remove_generated_certificates.yml2
-rw-r--r--roles/etcd/tasks/retrieve_ca_certificates.yml2
-rw-r--r--roles/etcd/tasks/server_certificates.yml6
-rw-r--r--roles/etcd/tasks/upgrade/upgrade_image.yml (renamed from roles/etcd_upgrade/tasks/upgrade_image.yml)14
-rw-r--r--roles/etcd/tasks/upgrade/upgrade_rpm.yml (renamed from roles/etcd_upgrade/tasks/upgrade_rpm.yml)5
-rw-r--r--roles/etcd/tasks/upgrade_image.yml2
-rw-r--r--roles/etcd/tasks/upgrade_rpm.yml2
-rw-r--r--roles/etcd/templates/etcd.conf.j21
-rw-r--r--roles/etcd/templates/etcdctl.sh.j2 (renamed from roles/etcd_common/templates/etcdctl.sh.j2)0
-rw-r--r--roles/etcd/templates/openssl_append.j2 (renamed from roles/etcd_ca/templates/openssl_append.j2)0
-rw-r--r--roles/etcd_ca/README.md34
-rw-r--r--roles/etcd_client_certificates/README.md34
-rw-r--r--roles/etcd_client_certificates/meta/main.yml16
-rw-r--r--roles/etcd_common/README.md53
-rw-r--r--roles/etcd_common/defaults/main.yml75
-rw-r--r--roles/etcd_common/meta/main.yml15
-rw-r--r--roles/etcd_common/tasks/main.yml9
-rw-r--r--roles/etcd_common/tasks/noop.yml4
-rw-r--r--roles/etcd_common/vars/main.yml4
-rw-r--r--roles/etcd_migrate/README.md53
-rw-r--r--roles/etcd_migrate/defaults/main.yml3
-rw-r--r--roles/etcd_migrate/meta/main.yml17
-rw-r--r--roles/etcd_migrate/tasks/main.yml25
-rw-r--r--roles/etcd_server_certificates/README.md34
-rw-r--r--roles/etcd_server_certificates/meta/main.yml17
-rw-r--r--roles/etcd_upgrade/defaults/main.yml3
-rw-r--r--roles/etcd_upgrade/meta/main.yml17
-rw-r--r--roles/etcd_upgrade/tasks/main.yml14
-rw-r--r--roles/etcd_upgrade/tasks/upgrade.yml11
-rw-r--r--roles/etcd_upgrade/vars/main.yml3
-rw-r--r--roles/flannel/README.md2
-rw-r--r--roles/flannel/meta/main.yml5
-rw-r--r--roles/flannel_register/defaults/main.yaml2
-rw-r--r--roles/installer_checkpoint/README.md177
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py182
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_csr.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py2
-rw-r--r--roles/lib_openshift/library/oc_configmap.py2
-rw-r--r--roles/lib_openshift/library/oc_edit.py2
-rw-r--r--roles/lib_openshift/library/oc_env.py2
-rw-r--r--roles/lib_openshift/library/oc_group.py2
-rw-r--r--roles/lib_openshift/library/oc_image.py2
-rw-r--r--roles/lib_openshift/library/oc_label.py2
-rw-r--r--roles/lib_openshift/library/oc_obj.py2
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py2
-rw-r--r--roles/lib_openshift/library/oc_process.py2
-rw-r--r--roles/lib_openshift/library/oc_project.py2
-rw-r--r--roles/lib_openshift/library/oc_pvc.py2
-rw-r--r--roles/lib_openshift/library/oc_route.py2
-rw-r--r--roles/lib_openshift/library/oc_scale.py2
-rw-r--r--roles/lib_openshift/library/oc_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_service.py2
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py2
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_storageclass.py2
-rw-r--r--roles/lib_openshift/library/oc_user.py2
-rw-r--r--roles/lib_openshift/library/oc_version.py2
-rw-r--r--roles/lib_openshift/library/oc_volume.py2
-rw-r--r--roles/lib_openshift/src/test/integration/filter_plugins/test_filters.py (renamed from roles/lib_openshift/src/test/integration/filter_plugins/filters.py)0
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_configmap.yml4
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_configmap.py6
-rw-r--r--roles/lib_utils/library/repoquery.py18
-rw-r--r--roles/lib_utils/library/yedit.py3
-rw-r--r--roles/lib_utils/src/ansible/repoquery.py17
-rw-r--r--roles/lib_utils/src/class/yedit.py2
-rw-r--r--roles/lib_utils/src/lib/import.py1
-rw-r--r--roles/nuage_master/meta/main.yml3
-rwxr-xr-xroles/nuage_master/templates/nuage-master-config-daemonset.j26
-rwxr-xr-xroles/nuage_master/templates/nuage-node-config-daemonset.j215
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_aws/defaults/main.yml2
-rw-r--r--roles/openshift_aws/filter_plugins/openshift_aws_filters.py (renamed from roles/openshift_aws/filter_plugins/filters.py)0
-rw-r--r--roles/openshift_aws/tasks/build_ami.yml2
-rw-r--r--roles/openshift_aws/tasks/elb.yml8
-rw-r--r--roles/openshift_aws/tasks/iam_cert.yml10
-rw-r--r--roles/openshift_aws/tasks/provision.yml4
-rw-r--r--roles/openshift_ca/defaults/main.yml8
-rw-r--r--roles/openshift_ca/meta/main.yml1
-rw-r--r--roles/openshift_ca/vars/main.yml7
-rw-r--r--roles/openshift_default_storage_class/README.md2
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml9
-rw-r--r--roles/openshift_etcd_ca/meta/main.yml18
-rw-r--r--roles/openshift_etcd_client_certificates/meta/main.yml4
-rw-r--r--roles/openshift_etcd_client_certificates/tasks/main.yml4
-rw-r--r--roles/openshift_etcd_server_certificates/meta/main.yml16
-rw-r--r--roles/openshift_examples/README.md14
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py209
-rw-r--r--roles/openshift_gcp/tasks/main.yaml43
-rw-r--r--roles/openshift_gcp/templates/dns.j2.sh13
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh318
-rw-r--r--roles/openshift_gcp/templates/remove.j2.sh156
-rw-r--r--roles/openshift_gcp_image_prep/files/partition.conf3
-rw-r--r--roles/openshift_gcp_image_prep/tasks/main.yaml18
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py160
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py16
-rw-r--r--roles/openshift_health_checker/library/ocutil.py11
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py151
-rw-r--r--roles/openshift_health_checker/openshift_checks/diagnostics.py62
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py9
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py91
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_volume.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py8
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py12
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py3
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_update.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py2
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py134
-rw-r--r--roles/openshift_health_checker/test/diagnostics_test.py50
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py13
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py192
-rw-r--r--roles/openshift_health_checker/test/elasticsearch_test.py18
-rw-r--r--roles/openshift_health_checker/test/logging_check_test.py8
-rw-r--r--roles/openshift_health_checker/test/logging_index_time_test.py8
-rw-r--r--roles/openshift_health_checker/test/openshift_check_test.py43
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py2
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py6
-rw-r--r--roles/openshift_health_checker/test/package_update_test.py6
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py8
-rw-r--r--roles/openshift_health_checker/test/zz_failure_summary_test.py15
-rw-r--r--roles/openshift_hosted/defaults/main.yml74
-rw-r--r--roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py (renamed from roles/openshift_hosted/filter_plugins/filters.py)0
-rw-r--r--roles/openshift_hosted/meta/main.yml1
-rw-r--r--roles/openshift_hosted/tasks/create_projects.yml14
-rw-r--r--roles/openshift_hosted/tasks/firewall.yml (renamed from roles/openshift_hosted/tasks/router/firewall.yml)10
-rw-r--r--roles/openshift_hosted/tasks/main.yml20
-rw-r--r--roles/openshift_hosted/tasks/registry.yml (renamed from roles/openshift_hosted/tasks/registry/registry.yml)65
-rw-r--r--roles/openshift_hosted/tasks/registry/firewall.yml40
-rw-r--r--roles/openshift_hosted/tasks/router.yml (renamed from roles/openshift_hosted/tasks/router/router.yml)54
-rw-r--r--roles/openshift_hosted/tasks/secure.yml (renamed from roles/openshift_hosted/tasks/registry/secure.yml)6
-rw-r--r--roles/openshift_hosted/tasks/secure/passthrough.yml (renamed from roles/openshift_hosted/tasks/registry/secure/passthrough.yml)0
-rw-r--r--roles/openshift_hosted/tasks/secure/reencrypt.yml (renamed from roles/openshift_hosted/tasks/registry/secure/reencrypt.yml)0
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs.yml (renamed from roles/openshift_hosted/tasks/registry/storage/glusterfs.yml)0
-rw-r--r--roles/openshift_hosted/tasks/storage/object_storage.yml (renamed from roles/openshift_hosted/tasks/registry/storage/object_storage.yml)0
l---------roles/openshift_hosted/tasks/storage/registry_config.j2 (renamed from roles/openshift_hosted/tasks/registry/storage/registry_config.j2)0
-rw-r--r--roles/openshift_hosted/tasks/storage/s3.yml (renamed from roles/openshift_hosted/tasks/registry/storage/s3.yml)2
-rw-r--r--roles/openshift_hosted/tasks/wait_for_pod.yml36
-rw-r--r--roles/openshift_hosted/templates/registry_config.j24
-rw-r--r--roles/openshift_hosted/vars/main.yml11
-rw-r--r--roles/openshift_hosted_facts/tasks/main.yml3
-rw-r--r--roles/openshift_hosted_logging/README.md40
-rw-r--r--roles/openshift_hosted_logging/defaults/main.yml2
-rw-r--r--roles/openshift_hosted_logging/handlers/main.yml21
-rw-r--r--roles/openshift_hosted_logging/meta/main.yaml3
-rw-r--r--roles/openshift_hosted_logging/tasks/cleanup_logging.yaml59
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml177
-rw-r--r--roles/openshift_hosted_logging/tasks/main.yaml8
-rw-r--r--roles/openshift_hosted_logging/tasks/update_master_config.yaml7
-rw-r--r--roles/openshift_hosted_logging/vars/main.yaml32
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml2
-rw-r--r--roles/openshift_logging/README.md36
-rw-r--r--roles/openshift_logging/defaults/main.yml67
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml6
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml10
-rw-r--r--roles/openshift_logging/tasks/main.yaml9
-rw-r--r--roles/openshift_logging_eventrouter/README.md20
-rw-r--r--roles/openshift_logging_eventrouter/defaults/main.yaml9
-rw-r--r--roles/openshift_logging_eventrouter/files/eventrouter-template.yaml103
-rw-r--r--roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml40
-rw-r--r--roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml59
-rw-r--r--roles/openshift_logging_eventrouter/tasks/main.yaml6
-rw-r--r--roles/openshift_logging_eventrouter/templates/eventrouter-template.j2109
-rw-r--r--roles/openshift_logging_eventrouter/vars/main.yaml2
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml2
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml3
-rw-r--r--roles/openshift_logging_fluentd/templates/fluent.conf.j22
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j252
-rw-r--r--roles/openshift_logging_mux/files/fluent.conf2
-rw-r--r--roles/openshift_logging_mux/templates/mux.j246
-rw-r--r--roles/openshift_manageiq/vars/main.yml3
-rw-r--r--roles/openshift_master/README.md2
-rw-r--r--roles/openshift_master/defaults/main.yml6
-rw-r--r--roles/openshift_master/meta/main.yml16
-rw-r--r--roles/openshift_master/tasks/main.yml40
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml35
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml2
-rw-r--r--roles/openshift_master/tasks/update_etcd_client_urls.yml8
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j212
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j212
-rw-r--r--roles/openshift_master/vars/main.yml19
-rw-r--r--roles/openshift_master_certificates/meta/main.yml4
-rw-r--r--roles/openshift_master_facts/defaults/main.yml2
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py30
l---------roles/openshift_master_facts/lookup_plugins/oo_option.py1
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py17
-rw-r--r--roles/openshift_master_facts/tasks/main.yml1
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py16
-rw-r--r--roles/openshift_metrics/README.md2
-rw-r--r--roles/openshift_metrics/defaults/main.yaml17
-rw-r--r--roles/openshift_metrics/tasks/install_hawkular.yaml1
-rw-r--r--roles/openshift_metrics/tasks/main.yaml8
-rw-r--r--roles/openshift_metrics/tasks/pre_install.yaml2
-rw-r--r--roles/openshift_metrics/templates/hawkular_cassandra_rc.j22
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j23
-rw-r--r--roles/openshift_metrics/templates/route.j23
-rw-r--r--roles/openshift_metrics/vars/default_images.yml4
-rw-r--r--roles/openshift_metrics/vars/openshift-enterprise.yml4
-rw-r--r--roles/openshift_named_certificates/defaults/main.yml6
-rw-r--r--roles/openshift_named_certificates/vars/main.yml6
-rw-r--r--roles/openshift_node/README.md2
-rw-r--r--roles/openshift_node/defaults/main.yml13
-rw-r--r--roles/openshift_node/handlers/main.yml3
-rw-r--r--roles/openshift_node/meta/main.yml2
-rw-r--r--roles/openshift_node/tasks/config.yml16
-rw-r--r--roles/openshift_node/tasks/install.yml4
-rw-r--r--roles/openshift_node/tasks/main.yml33
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml4
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml24
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service2
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service18
-rw-r--r--roles/openshift_node_certificates/meta/main.yml3
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh2
-rw-r--r--roles/openshift_node_dnsmasq/handlers/main.yml1
-rw-r--r--roles/openshift_node_dnsmasq/tasks/no-network-manager.yml9
-rw-r--r--roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py32
-rw-r--r--roles/openshift_node_facts/tasks/main.yml10
-rw-r--r--roles/openshift_node_upgrade/README.md2
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml6
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml2
-rw-r--r--roles/openshift_node_upgrade/tasks/registry_auth.yml24
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service2
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service17
-rw-r--r--roles/openshift_persistent_volumes/meta/main.yml3
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml2
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml5
-rw-r--r--roles/openshift_repos/README.md10
-rw-r--r--roles/openshift_repos/tasks/centos_repos.yml25
-rw-r--r--roles/openshift_repos/tasks/main.yaml19
-rw-r--r--roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2 (renamed from roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo)2
-rw-r--r--roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j227
-rw-r--r--roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j227
-rw-r--r--roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j227
-rw-r--r--roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py25
-rw-r--r--roles/openshift_sanitize_inventory/library/conditional_set_fact.py68
-rw-r--r--roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml48
-rw-r--r--roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml17
-rw-r--r--roles/openshift_sanitize_inventory/tasks/deprecations.yml21
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml4
-rw-r--r--roles/openshift_sanitize_inventory/vars/main.yml81
-rw-r--r--roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js2
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml31
-rw-r--r--roles/openshift_service_catalog/tasks/wire_aggregator.yml198
-rw-r--r--roles/openshift_storage_glusterfs/README.md5
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml6
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml143
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml134
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml12
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml10
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml1
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j236
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j249
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml6
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j26
-rw-r--r--roles/openshift_version/defaults/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml15
-rw-r--r--roles/openshift_version/tasks/set_version_containerized.yml11
-rw-r--r--roles/os_firewall/tasks/iptables.yml2
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml13
-rw-r--r--roles/rhel_subscribe/tasks/main.yml18
-rw-r--r--roles/template_service_broker/defaults/main.yml4
-rw-r--r--roles/template_service_broker/files/openshift-ansible-catalog-console.js1
-rw-r--r--roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js2
-rw-r--r--roles/template_service_broker/meta/main.yml (renamed from roles/etcd_ca/meta/main.yml)7
-rw-r--r--roles/template_service_broker/tasks/install.yml77
-rw-r--r--roles/template_service_broker/tasks/main.yml8
-rw-r--r--roles/template_service_broker/tasks/remove.yml35
-rw-r--r--roles/template_service_broker/vars/default_images.yml4
-rw-r--r--roles/template_service_broker/vars/main.yml7
-rw-r--r--roles/template_service_broker/vars/openshift-enterprise.yml4
340 files changed, 4524 insertions, 2044 deletions
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml
index 12929b354..9eb9db316 100644
--- a/roles/ansible_service_broker/defaults/main.yml
+++ b/roles/ansible_service_broker/defaults/main.yml
@@ -1,6 +1,7 @@
---
ansible_service_broker_remove: false
+ansible_service_broker_install: false
ansible_service_broker_log_level: info
ansible_service_broker_output_request: false
ansible_service_broker_recovery: true
diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml
index b46ce8233..d8695bd3a 100644
--- a/roles/ansible_service_broker/tasks/main.yml
+++ b/roles/ansible_service_broker/tasks/main.yml
@@ -2,7 +2,7 @@
# do any asserts here
- include: install.yml
- when: not ansible_service_broker_remove|default(false) | bool
+ when: ansible_service_broker_install | default(false) | bool
- include: remove.yml
- when: ansible_service_broker_remove|default(false) | bool
+ when: ansible_service_broker_remove | default(false) | bool
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index 39f730462..0e3863304 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -2,10 +2,14 @@
- name: Calico Node | Error if invalid cert arguments
fail:
msg: "Must provide all or none for the following etcd params: calico_etcd_cert_dir, calico_etcd_ca_cert_file, calico_etcd_cert_file, calico_etcd_key_file, calico_etcd_endpoints"
- when: (calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined) and not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined)
+ when:
+ - calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined
+ - not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined)
- name: Calico Node | Generate OpenShift-etcd certs
- include: ../../../roles/etcd_client_certificates/tasks/main.yml
+ include_role:
+ name: etcd
+ tasks_from: client_certificates
when: calico_etcd_ca_cert_file is not defined or calico_etcd_cert_file is not defined or calico_etcd_key_file is not defined or calico_etcd_endpoints is not defined or calico_etcd_cert_dir is not defined
vars:
etcd_cert_prefix: calico.etcd-
@@ -28,18 +32,18 @@
msg: "Invalid etcd configuration for calico."
when: item is not defined or item == ''
with_items:
- - calico_etcd_ca_cert_file
- - calico_etcd_cert_file
- - calico_etcd_key_file
- - calico_etcd_endpoints
+ - calico_etcd_ca_cert_file
+ - calico_etcd_cert_file
+ - calico_etcd_key_file
+ - calico_etcd_endpoints
- name: Calico Node | Assure the calico certs are present
stat:
path: "{{ item }}"
with_items:
- - "{{ calico_etcd_ca_cert_file }}"
- - "{{ calico_etcd_cert_file }}"
- - "{{ calico_etcd_key_file }}"
+ - "{{ calico_etcd_ca_cert_file }}"
+ - "{{ calico_etcd_cert_file }}"
+ - "{{ calico_etcd_key_file }}"
- name: Calico Node | Configure Calico service unit file
template:
diff --git a/roles/cockpit-ui/defaults/main.yml b/roles/cockpit-ui/defaults/main.yml
new file mode 100644
index 000000000..b1696f1b8
--- /dev/null
+++ b/roles/cockpit-ui/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+openshift_config_base: "/etc/origin"
+openshift_master_config_dir: "{{ openshift.common.config_base | default(openshift_config_base) }}/master"
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index 0114498f8..244e2cc41 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -50,7 +50,9 @@
-n default
register: deploy_registry_console
changed_when: "'already exists' not in deploy_registry_console.stderr"
- failed_when: "'already exists' not in deploy_registry_console.stderr and deploy_registry_console.rc != 0"
+ failed_when:
+ - "'already exists' not in deploy_registry_console.stderr"
+ - "deploy_registry_console.rc != 0"
- name: Delete temp directory
file:
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index ed97d539c..e36dfa7b9 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -1 +1,22 @@
---
+docker_cli_auth_config_path: '/root/.docker'
+
+# oreg_url is defined by user input.
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+oreg_auth_credentials_replace: False
+
+openshift_docker_additional_registries: []
+openshift_docker_blocked_registries: []
+openshift_docker_insecure_registries: []
+
+openshift_docker_ent_reg: 'registry.access.redhat.com'
+
+# The l2_docker_* variables convert csv strings to lists, if
+# necessary. These variables should be used in place of their respective
+# openshift_docker_* counterparts to ensure the properly formatted lists are
+# utilized.
+l2_docker_additional_registries: "{% if openshift_docker_additional_registries is string %}{% if openshift_docker_additional_registries == '' %}[]{% elif ',' in openshift_docker_additional_registries %}{{ openshift_docker_additional_registries.split(',') | list }}{% else %}{{ [ openshift_docker_additional_registries ] }}{% endif %}{% else %}{{ openshift_docker_additional_registries }}{% endif %}"
+l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}"
+l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}"
+
+containers_registries_conf_path: /etc/containers/registries.conf
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 78c6671d8..7ece0e061 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -26,3 +26,4 @@
include: systemcontainer_crio.yml
when:
- l_use_crio
+ - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index bc52ab60c..3e81d5c8e 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -3,6 +3,8 @@
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
changed_when: false
- name: Error out if Docker pre-installed but too old
@@ -48,25 +50,33 @@
src: custom.conf.j2
when: not os_firewall_use_firewalld | default(False) | bool
+- name: Add enterprise registry, if necessary
+ set_fact:
+ l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
+ when:
+ - openshift.common.deployment_type == 'openshift-enterprise'
+ - openshift_docker_ent_reg != ''
+ - openshift_docker_ent_reg not in l2_docker_additional_registries
+
- stat: path=/etc/sysconfig/docker
register: docker_check
-- name: Set registry params
+- name: Comment old registry params in /etc/sysconfig/docker
lineinfile:
dest: /etc/sysconfig/docker
regexp: '^{{ item.reg_conf_var }}=.*$'
- line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
- when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg
+ line: "#{{ item.reg_conf_var }}=''# Moved to {{ containers_registries_conf_path }}"
with_items:
- reg_conf_var: ADD_REGISTRY
- reg_fact_val: "{{ docker_additional_registries | default(None, true)}}"
- reg_flag: --add-registry
- reg_conf_var: BLOCK_REGISTRY
- reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}"
- reg_flag: --block-registry
- reg_conf_var: INSECURE_REGISTRY
- reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}"
- reg_flag: --insecure-registry
+ notify:
+ - restart docker
+
+- name: Place additional/blocked/insecure registries in /etc/containers/registries.conf
+ template:
+ dest: "{{ containers_registries_conf_path }}"
+ src: registries.conf
notify:
- restart docker
@@ -117,6 +127,18 @@
notify:
- restart docker
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ docker_cli_auth_config_path }}/config.json"
+ when: oreg_auth_user is defined
+ register: docker_cli_auth_credentials_stat
+
+- name: Create credentials for docker cli registry auth
+ command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+
- name: Start the Docker service
systemd:
name: docker
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 24ca0d9f8..66ce475e1 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -1,17 +1,17 @@
---
# TODO: Much of this file is shared with container engine tasks
- set_fact:
- l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(openshift.docker.insecure_registries)) }}"
- when: openshift.docker.insecure_registries
+ l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
+ when: l2_docker_insecure_registries
- set_fact:
- l_crio_registries: "{{ openshift.docker.additional_registries + ['docker.io'] }}"
- when: openshift.docker.additional_registries
+ l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
+ when: l2_docker_additional_registries
- set_fact:
l_crio_registries: "{{ ['docker.io'] }}"
- when: not openshift.docker.additional_registries
+ when: not l2_docker_additional_registries
- set_fact:
l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
- when: openshift.docker.additional_registries
+ when: l2_docker_additional_registries
- name: Ensure container-selinux is installed
package:
@@ -95,7 +95,7 @@
- name: Set to default prepend
set_fact:
l_crio_image_prepend: "docker.io/gscrivano"
- l_crio_image_name: "crio-o-fedora"
+ l_crio_image_name: "cri-o-fedora"
- name: Use Centos based image when distribution is CentOS
set_fact:
@@ -104,22 +104,26 @@
- name: Use RHEL based image when distribution is Red Hat
set_fact:
- l_crio_image_prepend: "registry.access.redhat.com"
+ l_crio_image_prepend: "registry.access.redhat.com/openshift3"
l_crio_image_name: "cri-o"
when: ansible_distribution == "RedHat"
- # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
- - name: Use a testing registry if requested
- set_fact:
- l_crio_image_prepend: "{{ openshift_crio_systemcontainer_image_registry_override }}"
- when:
- - openshift_crio_systemcontainer_image_registry_override is defined
- - openshift_crio_systemcontainer_image_registry_override != ""
-
- name: Set the full image name
set_fact:
l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:latest"
+ # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548
+ - name: Use a specific image if requested
+ set_fact:
+ l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}"
+ when:
+ - openshift_crio_systemcontainer_image_override is defined
+ - openshift_crio_systemcontainer_image_override != ""
+
+ # Be nice and let the user see the variable result
+ - debug:
+ var: l_crio_image
+
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull CRI-O System Container image
command: "atomic pull --storage ostree {{ l_crio_image }}"
@@ -134,6 +138,14 @@
image: "{{ l_crio_image }}"
state: latest
+- name: Remove CRI-o default configuration files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/cni/net.d/200-loopback.conf
+ - /etc/cni/net.d/100-crio-bridge.conf
+
- name: Create the CRI-O configuration
template:
dest: /etc/crio/crio.conf
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 146e5f430..8b43393cb 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -148,10 +148,10 @@
# Set local versions of facts that must be in json format for container-daemon.json
# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
- set_fact:
- l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}"
+ l_docker_insecure_registries: "{{ l2_docker_insecure_registries | default([]) | to_json }}"
l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}"
- l_docker_additional_registries: "{{ docker_additional_registries | default([]) | to_json }}"
- l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}"
+ l_docker_additional_registries: "{{ l2_docker_additional_registries | default([]) | to_json }}"
+ l_docker_blocked_registries: "{{ l2_docker_blocked_registries | default([]) | to_json }}"
l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}"
# Configure container-engine using the container-daemon.json file
diff --git a/roles/docker/templates/registries.conf b/roles/docker/templates/registries.conf
new file mode 100644
index 000000000..d379b2be0
--- /dev/null
+++ b/roles/docker/templates/registries.conf
@@ -0,0 +1,46 @@
+# {{ ansible_managed }}
+# This is a system-wide configuration file used to
+# keep track of registries for various container backends.
+# It adheres to YAML format and does not support recursive
+# lists of registries.
+
+# The default location for this configuration file is /etc/containers/registries.conf.
+
+# The only valid categories are: 'registries', 'insecure_registries',
+# and 'block_registries'.
+
+
+#registries:
+# - registry.access.redhat.com
+
+{% if l2_docker_additional_registries %}
+registries:
+{% for reg in l2_docker_additional_registries %}
+ - {{ reg }}
+{% endfor %}
+{% endif %}
+
+# If you need to access insecure registries, uncomment the section below
+# and add the registries fully-qualified name. An insecure registry is one
+# that does not have a valid SSL certificate or only does HTTP.
+#insecure_registries:
+# -
+
+{% if l2_docker_insecure_registries %}
+insecure_registries:
+{% for reg in l2_docker_insecure_registries %}
+ - {{ reg }}
+{% endfor %}
+{% endif %}
+
+# If you need to block pull access from a registry, uncomment the section below
+# and add the registries fully-qualified name.
+#block_registries:
+# -
+
+{% if l2_docker_blocked_registries %}
+block_registries:
+{% for reg in l2_docker_blocked_registries %}
+ - {{ reg }}
+{% endfor %}
+{% endif %}
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 3cc2bbb18..18164050a 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -1,6 +1,66 @@
---
-r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
-r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(Falsel) }}"
+r_etcd_common_backup_tag: ''
+r_etcd_common_backup_sufix_name: ''
+
+# runc, docker, host
+r_etcd_common_etcd_runtime: "docker"
+r_etcd_common_embedded_etcd: false
+
+# etcd run on a host => use etcdctl command directly
+# etcd run as a docker container => use docker exec
+# etcd run as a runc container => use runc exec
+r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
+
+# etcd server vars
+etcd_conf_dir: '/etc/etcd'
+r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd
+etcd_system_container_conf_dir: /var/lib/etcd/etc
+etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
+etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
+etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
+etcd_key_file: "{{ etcd_conf_dir }}/server.key"
+etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
+etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
+etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
+
+# etcd ca vars
+etcd_ca_dir: "{{ etcd_conf_dir}}/ca"
+etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs"
+etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt"
+etcd_ca_key: "{{ etcd_ca_dir }}/ca.key"
+etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf"
+etcd_ca_name: etcd_ca
+etcd_req_ext: etcd_v3_req
+etcd_ca_exts_peer: etcd_v3_ca_peer
+etcd_ca_exts_server: etcd_v3_ca_server
+etcd_ca_exts_self: etcd_v3_ca_self
+etcd_ca_exts_client: etcd_v3_ca_client
+etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl"
+etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs"
+etcd_ca_db: "{{ etcd_ca_dir }}/index.txt"
+etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
+etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
+etcd_ca_default_days: 1825
+
+r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt
+r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key
+r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt
+
+# etcd server & certificate vars
+etcd_hostname: "{{ inventory_hostname }}"
+etcd_ip: "{{ ansible_default_ipv4.address }}"
+etcd_is_atomic: False
+etcd_is_containerized: False
+etcd_is_thirdparty: False
+
+# etcd dir vars
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
+
+# etcd ports and protocols
+etcd_client_port: 2379
+etcd_peer_port: 2380
+etcd_url_scheme: http
+etcd_peer_url_scheme: http
etcd_initial_cluster_state: new
etcd_initial_cluster_token: etcd-cluster-1
@@ -10,8 +70,15 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_
etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-etcd_client_port: 2379
-etcd_peer_port: 2380
+etcd_peer: 127.0.0.1
+etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
+
+etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}"
+# Location of the service file is fixed and not meant to be changed
+etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service"
+
+r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(Falsel) }}"
etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d"
r_etcd_os_firewall_deny: []
@@ -20,3 +87,6 @@ r_etcd_os_firewall_allow:
port: "{{etcd_client_port}}/tcp"
- service: etcd peering
port: "{{ etcd_peer_port }}/tcp"
+
+# set the backend quota to 4GB by default
+etcd_quota_backend_bytes: 4294967296
diff --git a/roles/etcd_common/library/delegated_serial_command.py b/roles/etcd/library/delegated_serial_command.py
index 0cab1ca88..0cab1ca88 100755
--- a/roles/etcd_common/library/delegated_serial_command.py
+++ b/roles/etcd/library/delegated_serial_command.py
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 9a955c822..879ca4f4e 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -18,5 +18,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_os_firewall
-- role: etcd_server_certificates
-- role: etcd_common
+- role: lib_utils
diff --git a/roles/etcd_migrate/tasks/clean_data.yml b/roles/etcd/tasks/auxiliary/clean_data.yml
index 95a0e7c0a..95a0e7c0a 100644
--- a/roles/etcd_migrate/tasks/clean_data.yml
+++ b/roles/etcd/tasks/auxiliary/clean_data.yml
diff --git a/roles/etcd_common/tasks/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
index 6cb456677..11bd2310e 100644
--- a/roles/etcd_common/tasks/drop_etcdctl.yml
+++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
@@ -3,7 +3,7 @@
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not openshift.common.is_atomic | bool
-- name: Configure etcd profile.d alises
+- name: Configure etcd profile.d aliases
template:
dest: "/etc/profile.d/etcdctl.sh"
src: etcdctl.sh.j2
diff --git a/roles/etcd/tasks/backup.yml b/roles/etcd/tasks/backup.yml
new file mode 100644
index 000000000..c0538e596
--- /dev/null
+++ b/roles/etcd/tasks/backup.yml
@@ -0,0 +1,2 @@
+---
+- include: backup/backup.yml
diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd/tasks/backup/backup.yml
index 2bc486d3f..42d27c081 100644
--- a/roles/etcd_common/tasks/backup.yml
+++ b/roles/etcd/tasks/backup/backup.yml
@@ -29,7 +29,6 @@
- name: Check current etcd disk usage
shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1
register: l_etcd_disk_usage
- when: r_etcd_common_embedded_etcd | bool
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -37,9 +36,9 @@
- name: Abort if insufficient disk space for etcd backup
fail:
msg: >
- {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ l_etcd_disk_usage.stdout|int*2 }} Kb disk space required for etcd backup,
{{ l_avail_disk.stdout }} Kb available.
- when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int)
+ when: l_etcd_disk_usage.stdout|int*2 > l_avail_disk.stdout|int
# For non containerized and non embedded we should have the correct version of
# etcd installed already. So don't do anything.
diff --git a/roles/etcd/tasks/backup_ca_certificates.yml b/roles/etcd/tasks/backup_ca_certificates.yml
new file mode 100644
index 000000000..a41b032f3
--- /dev/null
+++ b/roles/etcd/tasks/backup_ca_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/backup_ca_certificates.yml
diff --git a/roles/etcd/tasks/backup_generated_certificates.yml b/roles/etcd/tasks/backup_generated_certificates.yml
new file mode 100644
index 000000000..8cf2a10cc
--- /dev/null
+++ b/roles/etcd/tasks/backup_generated_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/backup_generated_certificates.yml
diff --git a/roles/etcd/tasks/backup_server_certificates.yml b/roles/etcd/tasks/backup_server_certificates.yml
new file mode 100644
index 000000000..267ffeb4d
--- /dev/null
+++ b/roles/etcd/tasks/backup_server_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/backup_server_certificates.yml
diff --git a/roles/etcd/tasks/ca.yml b/roles/etcd/tasks/ca.yml
new file mode 100644
index 000000000..cca1e9ad7
--- /dev/null
+++ b/roles/etcd/tasks/ca.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/deploy_ca.yml
diff --git a/roles/etcd/tasks/certificates/backup_ca_certificates.yml b/roles/etcd/tasks/certificates/backup_ca_certificates.yml
new file mode 100644
index 000000000..f60eb82ef
--- /dev/null
+++ b/roles/etcd/tasks/certificates/backup_ca_certificates.yml
@@ -0,0 +1,12 @@
+---
+- name: Determine if CA certificate directory exists
+ stat:
+ path: "{{ etcd_ca_dir }}"
+ register: etcd_ca_certs_dir_stat
+- name: Backup generated etcd certificates
+ command: >
+ tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ etcd_ca_dir }}
+ args:
+ warn: no
+ when: etcd_ca_certs_dir_stat.stat.exists | bool
diff --git a/roles/etcd/tasks/certificates/backup_generated_certificates.yml b/roles/etcd/tasks/certificates/backup_generated_certificates.yml
new file mode 100644
index 000000000..6a24cfcb3
--- /dev/null
+++ b/roles/etcd/tasks/certificates/backup_generated_certificates.yml
@@ -0,0 +1,13 @@
+---
+- name: Determine if generated etcd certificates exist
+ stat:
+ path: "{{ etcd_conf_dir }}/generated_certs"
+ register: etcd_generated_certs_dir_stat
+
+- name: Backup generated etcd certificates
+ command: >
+ tar -czf {{ etcd_conf_dir }}/etcd-generated-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ etcd_conf_dir }}/generated_certs
+ args:
+ warn: no
+ when: etcd_generated_certs_dir_stat.stat.exists | bool
diff --git a/roles/etcd/tasks/certificates/backup_server_certificates.yml b/roles/etcd/tasks/certificates/backup_server_certificates.yml
new file mode 100644
index 000000000..8e6cc6965
--- /dev/null
+++ b/roles/etcd/tasks/certificates/backup_server_certificates.yml
@@ -0,0 +1,11 @@
+---
+- name: Backup etcd certificates
+ command: >
+ tar -czvf /etc/etcd/etcd-server-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ etcd_conf_dir }}/ca.crt
+ {{ etcd_conf_dir }}/server.crt
+ {{ etcd_conf_dir }}/server.key
+ {{ etcd_conf_dir }}/peer.crt
+ {{ etcd_conf_dir }}/peer.key
+ args:
+ warn: no
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd/tasks/certificates/deploy_ca.yml
index b4dea4a07..3d32290a2 100644
--- a/roles/etcd_ca/tasks/main.yml
+++ b/roles/etcd/tasks/certificates/deploy_ca.yml
@@ -1,6 +1,8 @@
---
- name: Install openssl
- package: name=openssl state=present
+ package:
+ name: openssl
+ state: present
when: not etcd_is_atomic | bool
delegate_to: "{{ etcd_ca_host }}"
run_once: true
diff --git a/roles/etcd/tasks/certificates/distribute_ca.yml b/roles/etcd/tasks/certificates/distribute_ca.yml
new file mode 100644
index 000000000..632ac15dd
--- /dev/null
+++ b/roles/etcd/tasks/certificates/distribute_ca.yml
@@ -0,0 +1,47 @@
+---
+- name: Create a tarball of the etcd ca certs
+ command: >
+ tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
+ -C {{ etcd_ca_dir }} .
+ args:
+ creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ warn: no
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+
+- name: Retrieve etcd ca cert tarball
+ fetch:
+ src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ etcd_sync_cert_dir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+
+- name: Ensure ca directory exists
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: directory
+
+- name: Unarchive etcd ca cert tarballs
+ unarchive:
+ src: "{{ etcd_sync_cert_dir }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ etcd_ca_dir }}"
+
+- name: Read current etcd CA
+ slurp:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ register: g_current_etcd_ca_output
+
+- name: Read new etcd CA
+ slurp:
+ src: "{{ etcd_ca_dir }}/ca.crt"
+ register: g_new_etcd_ca_output
+
+- copy:
+ content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
+ dest: "{{ item }}/ca.crt"
+ with_items:
+ - "{{ etcd_conf_dir }}"
+ - "{{ etcd_ca_dir }}"
diff --git a/roles/etcd_client_certificates/tasks/main.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
index bbd29ece1..119071a72 100644
--- a/roles/etcd_client_certificates/tasks/main.yml
+++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
@@ -9,7 +9,7 @@
- fail:
msg: >
CA certificate {{ etcd_ca_cert }} doesn't exist on CA host
- {{ etcd_ca_host }}. Apply 'etcd_ca' role to
+ {{ etcd_ca_host }}. Apply 'etcd_ca' action from `etcd` role to
{{ etcd_ca_host }}.
when: not g_ca_cert_stat_result.stat.exists | bool
run_once: true
diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
index 4795188a6..26492fb3c 100644
--- a/roles/etcd_server_certificates/tasks/main.yml
+++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
@@ -1,6 +1,8 @@
---
- name: Install etcd
- package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
+ package:
+ name: "etcd{{ '-' + etcd_version if etcd_version is defined else '' }}"
+ state: present
when: not etcd_is_containerized | bool
- name: Check status of etcd certificates
diff --git a/roles/etcd/tasks/certificates/remove_ca_certificates.yml b/roles/etcd/tasks/certificates/remove_ca_certificates.yml
new file mode 100644
index 000000000..4a86eb60d
--- /dev/null
+++ b/roles/etcd/tasks/certificates/remove_ca_certificates.yml
@@ -0,0 +1,5 @@
+---
+- name: Remove CA certificate directory
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: absent
diff --git a/roles/etcd/tasks/certificates/remove_generated_certificates.yml b/roles/etcd/tasks/certificates/remove_generated_certificates.yml
new file mode 100644
index 000000000..993b18de2
--- /dev/null
+++ b/roles/etcd/tasks/certificates/remove_generated_certificates.yml
@@ -0,0 +1,5 @@
+---
+- name: Remove generated etcd certificates
+ file:
+ path: "{{ etcd_conf_dir }}/generated_certs"
+ state: absent
diff --git a/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml b/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml
new file mode 100644
index 000000000..70b5c6523
--- /dev/null
+++ b/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml
@@ -0,0 +1,8 @@
+---
+- name: Retrieve etcd CA certificate
+ fetch:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ dest: "{{ etcd_sync_cert_dir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
diff --git a/roles/etcd/tasks/clean_data.yml b/roles/etcd/tasks/clean_data.yml
new file mode 100644
index 000000000..d131ffd21
--- /dev/null
+++ b/roles/etcd/tasks/clean_data.yml
@@ -0,0 +1,2 @@
+---
+- include: auxiliary/clean_data.yml
diff --git a/roles/etcd/tasks/client_certificates.yml b/roles/etcd/tasks/client_certificates.yml
new file mode 100644
index 000000000..2f4108a0d
--- /dev/null
+++ b/roles/etcd/tasks/client_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/fetch_client_certificates_from_ca.yml
diff --git a/roles/etcd/tasks/distribute_ca b/roles/etcd/tasks/distribute_ca
new file mode 100644
index 000000000..040c5f7af
--- /dev/null
+++ b/roles/etcd/tasks/distribute_ca
@@ -0,0 +1,2 @@
+---
+- include: certificates/distribute_ca.yml
diff --git a/roles/etcd/tasks/drop_etcdctl.yml b/roles/etcd/tasks/drop_etcdctl.yml
new file mode 100644
index 000000000..4c1f609f7
--- /dev/null
+++ b/roles/etcd/tasks/drop_etcdctl.yml
@@ -0,0 +1,2 @@
+---
+- include: auxiliary/drop_etcdctl.yml
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 78e543ef1..3e69af314 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -14,10 +14,7 @@
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not etcd_is_containerized | bool
-- include_role:
- name: etcd_common
- vars:
- r_etcd_common_action: drop_etcdctl
+- include: drop_etcdctl.yml
when:
- openshift_etcd_etcdctl_profile | default(true) | bool
diff --git a/roles/etcd/tasks/migrate.add_ttls.yml b/roles/etcd/tasks/migrate.add_ttls.yml
new file mode 100644
index 000000000..bc27e4ea1
--- /dev/null
+++ b/roles/etcd/tasks/migrate.add_ttls.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/add_ttls.yml
diff --git a/roles/etcd/tasks/migrate.configure_master.yml b/roles/etcd/tasks/migrate.configure_master.yml
new file mode 100644
index 000000000..3ada6e362
--- /dev/null
+++ b/roles/etcd/tasks/migrate.configure_master.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/configure_master.yml
diff --git a/roles/etcd/tasks/migrate.pre_check.yml b/roles/etcd/tasks/migrate.pre_check.yml
new file mode 100644
index 000000000..124d21561
--- /dev/null
+++ b/roles/etcd/tasks/migrate.pre_check.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/check.yml
diff --git a/roles/etcd/tasks/migrate.yml b/roles/etcd/tasks/migrate.yml
new file mode 100644
index 000000000..5d5385873
--- /dev/null
+++ b/roles/etcd/tasks/migrate.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/migrate.yml
diff --git a/roles/etcd_migrate/tasks/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml
index c10465af9..14625e49e 100644
--- a/roles/etcd_migrate/tasks/add_ttls.yml
+++ b/roles/etcd/tasks/migration/add_ttls.yml
@@ -8,6 +8,7 @@
accessTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.accessTokenMaxAgeSeconds | default(86400) }}"
authroizeTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.authroizeTokenMaxAgeSeconds | default(500) }}"
controllerLeaseTTL: "{{ (g_master_config_output.content|b64decode|from_yaml).controllerLeaseTTL | default(30) }}"
+
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
oadm migrate etcd-ttl \
diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd/tasks/migration/check.yml
index 0804d9e1c..0804d9e1c 100644
--- a/roles/etcd_migrate/tasks/check.yml
+++ b/roles/etcd/tasks/migration/check.yml
diff --git a/roles/etcd_migrate/tasks/check_cluster_health.yml b/roles/etcd/tasks/migration/check_cluster_health.yml
index 201d83f99..201d83f99 100644
--- a/roles/etcd_migrate/tasks/check_cluster_health.yml
+++ b/roles/etcd/tasks/migration/check_cluster_health.yml
diff --git a/roles/etcd_migrate/tasks/check_cluster_status.yml b/roles/etcd/tasks/migration/check_cluster_status.yml
index b69fb5a52..b69fb5a52 100644
--- a/roles/etcd_migrate/tasks/check_cluster_status.yml
+++ b/roles/etcd/tasks/migration/check_cluster_status.yml
diff --git a/roles/etcd_migrate/tasks/configure.yml b/roles/etcd/tasks/migration/configure_master.yml
index a305d5bf3..a305d5bf3 100644
--- a/roles/etcd_migrate/tasks/configure.yml
+++ b/roles/etcd/tasks/migration/configure_master.yml
diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd/tasks/migration/migrate.yml
index 54a9c74ff..54a9c74ff 100644
--- a/roles/etcd_migrate/tasks/migrate.yml
+++ b/roles/etcd/tasks/migration/migrate.yml
diff --git a/roles/etcd/tasks/remove_ca_certificates.yml b/roles/etcd/tasks/remove_ca_certificates.yml
new file mode 100644
index 000000000..36df1a1cc
--- /dev/null
+++ b/roles/etcd/tasks/remove_ca_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/remove_ca_certificates.yml
diff --git a/roles/etcd/tasks/remove_generated_certificates.yml b/roles/etcd/tasks/remove_generated_certificates.yml
new file mode 100644
index 000000000..b10a4b32d
--- /dev/null
+++ b/roles/etcd/tasks/remove_generated_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/remove_generated_certificates.yml
diff --git a/roles/etcd/tasks/retrieve_ca_certificates.yml b/roles/etcd/tasks/retrieve_ca_certificates.yml
new file mode 100644
index 000000000..bd6c4ec85
--- /dev/null
+++ b/roles/etcd/tasks/retrieve_ca_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/retrieve_ca_certificates.yml
diff --git a/roles/etcd/tasks/server_certificates.yml b/roles/etcd/tasks/server_certificates.yml
new file mode 100644
index 000000000..ae26079f9
--- /dev/null
+++ b/roles/etcd/tasks/server_certificates.yml
@@ -0,0 +1,6 @@
+---
+- include: ca.yml
+ when:
+ - etcd_ca_setup | default(True) | bool
+
+- include: certificates/fetch_server_certificates_from_ca.yml
diff --git a/roles/etcd_upgrade/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade/upgrade_image.yml
index 136ec1142..24071f9ad 100644
--- a/roles/etcd_upgrade/tasks/upgrade_image.yml
+++ b/roles/etcd/tasks/upgrade/upgrade_image.yml
@@ -20,6 +20,11 @@
regexp: "{{ current_image.stdout }}$"
replace: "{{ new_etcd_image }}"
+- lineinfile:
+ destfile: "{{ etcd_conf_file }}"
+ regexp: '^ETCD_QUOTA_BACKEND_BYTES='
+ line: "ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}"
+
- name: Restart etcd_container
systemd:
name: "{{ etcd_service }}"
@@ -29,8 +34,15 @@
## TODO: probably should just move this into the backup playbooks, also this
## will fail on atomic host. We need to revisit how to do etcd backups there as
## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: l_ostree_booted
+
- name: Upgrade etcd for etcdctl when not atomic
- package: name=etcd state=latest
+ package:
+ name: etcd
+ state: latest
when: not l_ostree_booted.stat.exists | bool
- name: Verify cluster is healthy
diff --git a/roles/etcd_upgrade/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade/upgrade_rpm.yml
index 324b69605..505e28afb 100644
--- a/roles/etcd_upgrade/tasks/upgrade_rpm.yml
+++ b/roles/etcd/tasks/upgrade/upgrade_rpm.yml
@@ -19,6 +19,11 @@
name: "{{ l_etcd_target_package }}"
state: latest
+- lineinfile:
+ destfile: "{{ etcd_conf_file }}"
+ regexp: '^ETCD_QUOTA_BACKEND_BYTES='
+ line: "ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}"
+
- name: Restart etcd
service:
name: "{{ etcd_service }}"
diff --git a/roles/etcd/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade_image.yml
new file mode 100644
index 000000000..9e69027eb
--- /dev/null
+++ b/roles/etcd/tasks/upgrade_image.yml
@@ -0,0 +1,2 @@
+---
+- include: upgrade/upgrade_image.yml
diff --git a/roles/etcd/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade_rpm.yml
new file mode 100644
index 000000000..29603d2b6
--- /dev/null
+++ b/roles/etcd/tasks/upgrade_rpm.yml
@@ -0,0 +1,2 @@
+---
+- include: upgrade/upgrade_rpm.yml
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 2c2803aee..8462bb4c8 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -45,6 +45,7 @@ ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
#ETCD_STRICT_RECONFIG_CHECK="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"
#ETCD_ENABLE_V2="true"
+ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}
#[proxy]
#ETCD_PROXY=off
diff --git a/roles/etcd_common/templates/etcdctl.sh.j2 b/roles/etcd/templates/etcdctl.sh.j2
index ac7d9c72f..ac7d9c72f 100644
--- a/roles/etcd_common/templates/etcdctl.sh.j2
+++ b/roles/etcd/templates/etcdctl.sh.j2
diff --git a/roles/etcd_ca/templates/openssl_append.j2 b/roles/etcd/templates/openssl_append.j2
index f28316fc2..f28316fc2 100644
--- a/roles/etcd_ca/templates/openssl_append.j2
+++ b/roles/etcd/templates/openssl_append.j2
diff --git a/roles/etcd_ca/README.md b/roles/etcd_ca/README.md
deleted file mode 100644
index 60a880e30..000000000
--- a/roles/etcd_ca/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-etcd_ca
-========================
-
-TODO
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Scott Dodson (sdodson@redhat.com)
diff --git a/roles/etcd_client_certificates/README.md b/roles/etcd_client_certificates/README.md
deleted file mode 100644
index 269d5296d..000000000
--- a/roles/etcd_client_certificates/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-OpenShift Etcd Certificates
-===========================
-
-TODO
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Scott Dodson (sdodson@redhat.com)
diff --git a/roles/etcd_client_certificates/meta/main.yml b/roles/etcd_client_certificates/meta/main.yml
deleted file mode 100644
index efebdb599..000000000
--- a/roles/etcd_client_certificates/meta/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: Etcd Client Certificates
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- role: etcd_common
diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md
deleted file mode 100644
index d1c3a6602..000000000
--- a/roles/etcd_common/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-etcd_common
-========================
-
-Common resources for dependent etcd roles. E.g. default variables for:
-* config directories
-* certificates
-* ports
-* other settings
-
-Or `delegated_serial_command` ansible module for executing a command on a remote node. E.g.
-
-```yaml
-- delegated_serial_command:
- command: /usr/bin/make_database.sh arg1 arg2
- creates: /path/to/database
-```
-
-Or etcdctl.yml playbook for installation of `etcdctl` aliases on a node (see example).
-
-Dependencies
-------------
-
-openshift-repos
-
-Example Playbook
-----------------
-
-**Drop etcdctl aliases**
-
-```yaml
-- include_role:
- name: etcd_common
- tasks_from: etcdctl
-```
-
-**Get access to common variables**
-
-```yaml
-# meta.yml of etcd
-...
-dependencies:
-- { role: etcd_common }
-```
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
deleted file mode 100644
index 89993f7ea..000000000
--- a/roles/etcd_common/defaults/main.yml
+++ /dev/null
@@ -1,75 +0,0 @@
----
-# Default action when calling this role
-r_etcd_common_action: noop
-r_etcd_common_backup_tag: ''
-r_etcd_common_backup_sufix_name: ''
-
-# runc, docker, host
-r_etcd_common_etcd_runtime: "docker"
-r_etcd_common_embedded_etcd: false
-
-# etcd run on a host => use etcdctl command directly
-# etcd run as a docker container => use docker exec
-# etcd run as a runc container => use runc exec
-r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
-
-# etcd server vars
-etcd_conf_dir: '/etc/etcd'
-r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd
-etcd_system_container_conf_dir: /var/lib/etcd/etc
-etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
-etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
-etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
-etcd_key_file: "{{ etcd_conf_dir }}/server.key"
-etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
-etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
-etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
-
-# etcd ca vars
-etcd_ca_dir: "{{ etcd_conf_dir}}/ca"
-etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs"
-etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt"
-etcd_ca_key: "{{ etcd_ca_dir }}/ca.key"
-etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf"
-etcd_ca_name: etcd_ca
-etcd_req_ext: etcd_v3_req
-etcd_ca_exts_peer: etcd_v3_ca_peer
-etcd_ca_exts_server: etcd_v3_ca_server
-etcd_ca_exts_self: etcd_v3_ca_self
-etcd_ca_exts_client: etcd_v3_ca_client
-etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl"
-etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs"
-etcd_ca_db: "{{ etcd_ca_dir }}/index.txt"
-etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
-etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
-etcd_ca_default_days: 1825
-
-r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt
-r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key
-r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt
-
-# etcd server & certificate vars
-etcd_hostname: "{{ inventory_hostname }}"
-etcd_ip: "{{ ansible_default_ipv4.address }}"
-etcd_is_atomic: False
-etcd_is_containerized: False
-etcd_is_thirdparty: False
-
-# etcd dir vars
-etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if openshift.common.etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
-
-# etcd ports and protocols
-etcd_client_port: 2379
-etcd_peer_port: 2380
-etcd_url_scheme: http
-etcd_peer_url_scheme: http
-
-etcd_initial_cluster_state: new
-etcd_initial_cluster_token: etcd-cluster-1
-
-etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
-etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
-etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-
-etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d"
diff --git a/roles/etcd_common/meta/main.yml b/roles/etcd_common/meta/main.yml
deleted file mode 100644
index dfb1c7a2c..000000000
--- a/roles/etcd_common/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description:
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies: []
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
deleted file mode 100644
index 6ed87e6c7..000000000
--- a/roles/etcd_common/tasks/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: Fail if invalid r_etcd_common_action provided
- fail:
- msg: "etcd_common role can only be called with 'noop' or 'backup' or 'drop_etcdctl'"
- when: r_etcd_common_action not in ['noop', 'backup', 'drop_etcdctl']
-
-- name: Include main action task file
- include: "{{ r_etcd_common_action }}.yml"
- when: r_etcd_common_action != "noop"
diff --git a/roles/etcd_common/tasks/noop.yml b/roles/etcd_common/tasks/noop.yml
deleted file mode 100644
index a88d78235..000000000
--- a/roles/etcd_common/tasks/noop.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# This is file is here because the usage of tags, specifically `pre_upgrade`
-# breaks the functionality of this role.
-# See https://bugzilla.redhat.com/show_bug.cgi?id=1464025
diff --git a/roles/etcd_common/vars/main.yml b/roles/etcd_common/vars/main.yml
deleted file mode 100644
index 00d697776..000000000
--- a/roles/etcd_common/vars/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}"
-# Location of the service file is fixed and not meant to be changed
-etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service"
diff --git a/roles/etcd_migrate/README.md b/roles/etcd_migrate/README.md
deleted file mode 100644
index 369e78ff2..000000000
--- a/roles/etcd_migrate/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-Role Name
-=========
-
-Offline etcd migration of data from v2 to v3
-
-Requirements
-------------
-
-It is expected all consumers of the etcd data are not accessing the data.
-Otherwise the migrated data can be out-of-sync with the v2 and can result in unhealthy etcd cluster.
-
-The role itself is responsible for:
-- checking etcd cluster health and raft status before the migration
-- checking of presence of any v3 data (in that case the migration is stopped)
-- migration of v2 data to v3 data (including attaching leases of keys prefixed with "/kubernetes.io/events" and "/kubernetes.io/masterleases" string)
-- validation of migrated data (all v2 keys and in v3 keys and are set to the identical value)
-
-The migration itself requires an etcd member to be down in the process. Once the migration is done, the etcd member is started.
-
-Role Variables
---------------
-
-TBD
-
-Dependencies
-------------
-
-- etcd_common
-- lib_utils
-
-Example Playbook
-----------------
-
-```yaml
-- name: Migrate etcd data from v2 to v3
- hosts: oo_etcd_to_config
- gather_facts: no
- tasks:
- - include_role:
- name: openshift_etcd_migrate
- vars:
- etcd_peer: "{{ ansible_default_ipv4.address }}"
-```
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Jan Chaloupka (jchaloup@redhat.com)
diff --git a/roles/etcd_migrate/defaults/main.yml b/roles/etcd_migrate/defaults/main.yml
deleted file mode 100644
index 05cf41fbb..000000000
--- a/roles/etcd_migrate/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# Default action when calling this role, choices: check, migrate, configure
-r_etcd_migrate_action: migrate
diff --git a/roles/etcd_migrate/meta/main.yml b/roles/etcd_migrate/meta/main.yml
deleted file mode 100644
index f3cabbef6..000000000
--- a/roles/etcd_migrate/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-galaxy_info:
- author: Jan Chaloupka
- description: Etcd migration
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- { role: etcd_common }
-- { role: lib_utils }
diff --git a/roles/etcd_migrate/tasks/main.yml b/roles/etcd_migrate/tasks/main.yml
deleted file mode 100644
index e82f6a6b4..000000000
--- a/roles/etcd_migrate/tasks/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Fail if invalid r_etcd_migrate_action provided
- fail:
- msg: "etcd_migrate role can only be called with 'check', 'migrate', 'configure', 'add_ttls', or 'clean_data'"
- when: r_etcd_migrate_action not in ['check', 'migrate', 'configure', 'add_ttls', 'clean_data']
-
-- name: Include main action task file
- include: "{{ r_etcd_migrate_action }}.yml"
-
-# 2. migrate v2 datadir into v3:
-# ETCDCTL_API=3 ./etcdctl migrate --data-dir=${data_dir} --no-ttl
-# backup the etcd datadir first
-# Provide a way for an operator to specify transformer
-
-# 3. re-configure OpenShift master at /etc/origin/master/master-config.yml
-# set storage-backend to “etcd3”
-# 4. we could leave the master restart to current logic (there is already the code ready (single vs. HA master))
-
-# Run
-# etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://172.16.186.45:2379 cluster-health
-# to check the cluster health (from the etcdctl.sh aliases file)
-
-# Another assumption:
-# - in order to migrate all etcd v2 data into v3, we need to shut down the cluster (let's verify that on Wednesday meeting)
-# -
diff --git a/roles/etcd_server_certificates/README.md b/roles/etcd_server_certificates/README.md
deleted file mode 100644
index 269d5296d..000000000
--- a/roles/etcd_server_certificates/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-OpenShift Etcd Certificates
-===========================
-
-TODO
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Scott Dodson (sdodson@redhat.com)
diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml
deleted file mode 100644
index 4b6013a49..000000000
--- a/roles/etcd_server_certificates/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: Etcd Server Certificates
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- role: etcd_ca
- when: (etcd_ca_setup | default(True) | bool)
diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml
deleted file mode 100644
index 61bbba225..000000000
--- a/roles/etcd_upgrade/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-r_etcd_upgrade_action: upgrade
-r_etcd_upgrade_mechanism: rpm
diff --git a/roles/etcd_upgrade/meta/main.yml b/roles/etcd_upgrade/meta/main.yml
deleted file mode 100644
index afdb0267f..000000000
--- a/roles/etcd_upgrade/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-galaxy_info:
- author: Jan Chaloupka
- description:
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- role: etcd_common
- r_etcd_common_embedded_etcd: "{{ r_etcd_upgrade_embedded_etcd }}"
diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml
deleted file mode 100644
index 129c69d6b..000000000
--- a/roles/etcd_upgrade/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# INPUT r_etcd_upgrade_action
-- name: Fail if invalid etcd_upgrade_action provided
- fail:
- msg: "etcd_upgrade role can only be called with 'upgrade'"
- when:
- - r_etcd_upgrade_action not in ['upgrade']
-
-- name: Detecting Atomic Host Operating System
- stat:
- path: /run/ostree-booted
- register: l_ostree_booted
-
-- include: "{{ r_etcd_upgrade_action }}.yml"
diff --git a/roles/etcd_upgrade/tasks/upgrade.yml b/roles/etcd_upgrade/tasks/upgrade.yml
deleted file mode 100644
index 420c9638e..000000000
--- a/roles/etcd_upgrade/tasks/upgrade.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# INPUT r_etcd_upgrade_version
-# INPUT r_etcd_upgrade_mechanism
-- name: Failt if r_etcd_upgrade_mechanism is not set during upgrade
- fail:
- msg: "r_etcd_upgrade_mechanism can be only set to 'rpm' or 'image'"
- when:
- - r_etcd_upgrade_mechanism not in ['rpm', 'image']
-
-- name: "Upgrade {{ r_etcd_upgrade_mechanism }} based etcd"
- include: upgrade_{{ r_etcd_upgrade_mechanism }}.yml
diff --git a/roles/etcd_upgrade/vars/main.yml b/roles/etcd_upgrade/vars/main.yml
deleted file mode 100644
index 5ed919d42..000000000
--- a/roles/etcd_upgrade/vars/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# EXPECTS etcd_peer
-etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
diff --git a/roles/flannel/README.md b/roles/flannel/README.md
index 0c7347603..b9e15e6e0 100644
--- a/roles/flannel/README.md
+++ b/roles/flannel/README.md
@@ -27,8 +27,6 @@ Role Variables
Dependencies
------------
-openshift_facts
-
Example Playbook
----------------
diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml
index 35f825586..51128dba6 100644
--- a/roles/flannel/meta/main.yml
+++ b/roles/flannel/meta/main.yml
@@ -12,7 +12,4 @@ galaxy_info:
categories:
- cloud
- system
-dependencies:
-- role: openshift_facts
-- role: openshift_etcd_client_certificates
- etcd_cert_prefix: flannel.etcd-
+dependencies: []
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index 71c8f38c3..1d0f5df6a 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -1,6 +1,6 @@
---
flannel_network: "{{ openshift.master.sdn_cluster_network_cidr }}"
-flannel_subnet_len: "{{ 32 - openshift.master.sdn_host_subnet_length }}"
+flannel_subnet_len: "{{ 32 - (openshift.master.sdn_host_subnet_length | int) }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
etcd_conf_dir: "{{ openshift.common.config_base }}/master"
diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md
new file mode 100644
index 000000000..321acca21
--- /dev/null
+++ b/roles/installer_checkpoint/README.md
@@ -0,0 +1,177 @@
+OpenShift-Ansible Installer Checkpoint
+======================================
+
+A complete OpenShift cluster installation is comprised of many different
+components which can take 30 minutes to several hours to complete. If the
+installation should fail, it could be confusing to understand at which component
+the failure occurred. Additionally, it may be desired to re-run only the
+component which failed instead of starting over from the beginning. Components
+which came after the failed component would also need to be run individually.
+
+Design
+------
+
+The Installer Checkpoint implements an Ansible callback plugin to allow
+displaying and logging of the installer status at the end of a playbook run.
+
+To ensure the callback plugin is loaded, regardless of ansible.cfg file
+configuration, the plugin has been placed inside the installer_checkpoint role
+which must be called early in playbook execution. The `std_include.yml` playbook
+is run first for all entry point playbooks, therefore, the initialization of the
+checkpoint plugin has been placed at the beginning of that file.
+
+Playbooks use the [set_stats][set_stats] Ansible module to set a custom stats
+variable indicating the status of the phase being executed.
+
+The installer_checkpoint.py callback plugin extends the Ansible
+`v2_playbook_on_stats` method, which is called at the end of a playbook run, to
+display the status of each phase which was run. The INSTALLER STATUS report is
+displayed immediately following the PLAY RECAP.
+
+Phases of cluster installation are mapped to the steps in the
+[common/openshift-cluster/config.yml][openshift_cluster_config] playbook.
+
+To correctly display the order of the installer phases, the `installer_phases`
+variable defines the phase or component order.
+
+```python
+ # Set the order of the installer phases
+ installer_phases = [
+ 'installer_phase_initialize',
+ 'installer_phase_etcd',
+ 'installer_phase_nfs',
+ 'installer_phase_loadbalancer',
+ 'installer_phase_master',
+ 'installer_phase_master_additional',
+ 'installer_phase_node',
+ 'installer_phase_glusterfs',
+ 'installer_phase_hosted',
+ 'installer_phase_metrics',
+ 'installer_phase_logging',
+ 'installer_phase_servicecatalog',
+ ]
+```
+
+Additional attributes, such as display title and component playbook, of each
+phase are stored in the `phase_attributes` variable.
+
+```python
+ # Define the attributes of the installer phases
+ phase_attributes = {
+ 'installer_phase_initialize': {
+ 'title': 'Initialization',
+ 'playbook': ''
+ },
+ 'installer_phase_etcd': {
+ 'title': 'etcd Install',
+ 'playbook': 'playbooks/byo/openshift-etcd/config.yml'
+ },
+ 'installer_phase_nfs': {
+ 'title': 'NFS Install',
+ 'playbook': 'playbooks/byo/openshift-nfs/config.yml'
+ },
+ #...
+ }
+```
+
+Usage
+-----
+
+In order to indicate the beginning of a component installation, a play must be
+added to the beginning of the main playbook for the component to set the phase
+status to "In Progress". Additionally, a play must be added after the last play
+for that component to set the phase status to "Complete".
+
+The following example shows the first play of the 'installer phase' loading the
+`installer_checkpoint` role, as well as the `set_stats` task for setting
+`installer_phase_initialize` to "In Progress". Various plays are run for the
+phase/component and then a final play for setting `installer_hase_initialize` to
+"Complete".
+
+```yaml
+# common/openshift-cluster/std_include.yml
+---
+- name: Initialization Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ roles:
+ - installer_checkpoint
+ tasks:
+ - name: Set install initialization 'In Progress'
+ set_stats:
+ data:
+ installer_phase_initialize: "In Progress"
+ aggregate: false
+
+#...
+# Various plays here
+#...
+
+- name: Initialization Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set install initialization 'Complete'
+ set_stats:
+ data:
+ installer_phase_initialize: "Complete"
+ aggregate: false
+```
+
+Each phase or component of the installer will follow a similar pattern, with the
+exception that the `installer_checkpoint` role does not need to be called since
+it was already loaded by the play in `std_include.yml`. It is important to
+place the 'In Progress' and 'Complete' plays as the first and last plays of the
+phase or component.
+
+Examples
+--------
+
+Example display of a successful playbook run:
+
+```
+PLAY RECAP *********************************************************************
+master01.example.com : ok=158 changed=16 unreachable=0 failed=0
+node01.example.com : ok=469 changed=74 unreachable=0 failed=0
+node02.example.com : ok=157 changed=17 unreachable=0 failed=0
+localhost : ok=24 changed=0 unreachable=0 failed=0
+
+
+INSTALLER STATUS ***************************************************************
+Initialization : Complete
+etcd Install : Complete
+NFS Install : Not Started
+Load balancer Install : Not Started
+Master Install : Complete
+Master Additional Install : Complete
+Node Install : Complete
+GlusterFS Install : Not Started
+Hosted Install : Complete
+Metrics Install : Not Started
+Logging Install : Not Started
+Service Catalog Install : Not Started
+```
+
+Example display if a failure occurs during execution:
+
+```
+INSTALLER STATUS ***************************************************************
+Initialization : Complete
+etcd Install : Complete
+NFS Install : Not Started
+Load balancer Install : Not Started
+Master Install : In Progress
+ This phase can be restarted by running: playbooks/byo/openshift-master/config.yml
+Master Additional Install : Not Started
+Node Install : Not Started
+GlusterFS Install : Not Started
+Hosted Install : Not Started
+Metrics Install : Not Started
+Logging Install : Not Started
+Service Catalog Install : Not Started
+```
+
+[set_stats]: http://docs.ansible.com/ansible/latest/set_stats_module.html
+[openshift_cluster_config]: https://github.com/openshift/openshift-ansible/blob/master/playbooks/common/openshift-cluster/config.yml
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
new file mode 100644
index 000000000..033240e62
--- /dev/null
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -0,0 +1,182 @@
+"""Ansible callback plugin to print a summary completion status of installation
+phases.
+"""
+from ansible.plugins.callback import CallbackBase
+from ansible import constants as C
+
+DOCUMENTATION = '''
+
+'''
+
+EXAMPLES = '''
+---------------------------------------------
+Example display of a successful playbook run:
+
+PLAY RECAP *********************************************************************
+master01.example.com : ok=158 changed=16 unreachable=0 failed=0
+node01.example.com : ok=469 changed=74 unreachable=0 failed=0
+node02.example.com : ok=157 changed=17 unreachable=0 failed=0
+localhost : ok=24 changed=0 unreachable=0 failed=0
+
+
+INSTALLER STATUS ***************************************************************
+Initialization : Complete
+etcd Install : Complete
+NFS Install : Not Started
+Load balancer Install : Not Started
+Master Install : Complete
+Master Additional Install : Complete
+Node Install : Complete
+GlusterFS Install : Not Started
+Hosted Install : Complete
+Metrics Install : Not Started
+Logging Install : Not Started
+Service Catalog Install : Not Started
+
+-----------------------------------------------------
+Example display if a failure occurs during execution:
+
+INSTALLER STATUS ***************************************************************
+Initialization : Complete
+etcd Install : Complete
+NFS Install : Not Started
+Load balancer Install : Not Started
+Master Install : In Progress
+ This phase can be restarted by running: playbooks/byo/openshift-master/config.yml
+Master Additional Install : Not Started
+Node Install : Not Started
+GlusterFS Install : Not Started
+Hosted Install : Not Started
+Metrics Install : Not Started
+Logging Install : Not Started
+Service Catalog Install : Not Started
+
+'''
+
+
+class CallbackModule(CallbackBase):
+ """This callback summarizes installation phase status."""
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'installer_checkpoint'
+ CALLBACK_NEEDS_WHITELIST = False
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ def v2_playbook_on_stats(self, stats):
+
+ # Set the order of the installer phases
+ installer_phases = [
+ 'installer_phase_initialize',
+ 'installer_phase_etcd',
+ 'installer_phase_nfs',
+ 'installer_phase_loadbalancer',
+ 'installer_phase_master',
+ 'installer_phase_master_additional',
+ 'installer_phase_node',
+ 'installer_phase_glusterfs',
+ 'installer_phase_hosted',
+ 'installer_phase_metrics',
+ 'installer_phase_logging',
+ 'installer_phase_servicecatalog',
+ ]
+
+ # Define the attributes of the installer phases
+ phase_attributes = {
+ 'installer_phase_initialize': {
+ 'title': 'Initialization',
+ 'playbook': ''
+ },
+ 'installer_phase_etcd': {
+ 'title': 'etcd Install',
+ 'playbook': 'playbooks/byo/openshift-etcd/config.yml'
+ },
+ 'installer_phase_nfs': {
+ 'title': 'NFS Install',
+ 'playbook': 'playbooks/byo/openshift-nfs/config.yml'
+ },
+ 'installer_phase_loadbalancer': {
+ 'title': 'Load balancer Install',
+ 'playbook': 'playbooks/byo/openshift-loadbalancer/config.yml'
+ },
+ 'installer_phase_master': {
+ 'title': 'Master Install',
+ 'playbook': 'playbooks/byo/openshift-master/config.yml'
+ },
+ 'installer_phase_master_additional': {
+ 'title': 'Master Additional Install',
+ 'playbook': 'playbooks/byo/openshift-master/additional_config.yml'
+ },
+ 'installer_phase_node': {
+ 'title': 'Node Install',
+ 'playbook': 'playbooks/byo/openshift-node/config.yml'
+ },
+ 'installer_phase_glusterfs': {
+ 'title': 'GlusterFS Install',
+ 'playbook': 'playbooks/byo/openshift-glusterfs/config.yml'
+ },
+ 'installer_phase_hosted': {
+ 'title': 'Hosted Install',
+ 'playbook': 'playbooks/byo/openshift-cluster/openshift-hosted.yml'
+ },
+ 'installer_phase_metrics': {
+ 'title': 'Metrics Install',
+ 'playbook': 'playbooks/byo/openshift-cluster/openshift-metrics.yml'
+ },
+ 'installer_phase_logging': {
+ 'title': 'Logging Install',
+ 'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml'
+ },
+ 'installer_phase_servicecatalog': {
+ 'title': 'Service Catalog Install',
+ 'playbook': 'playbooks/byo/openshift-cluster/service-catalog.yml'
+ },
+ }
+
+ # Find the longest phase title
+ max_column = 0
+ for phase in phase_attributes:
+ max_column = max(max_column, len(phase_attributes[phase]['title']))
+
+ if '_run' in stats.custom:
+ self._display.banner('INSTALLER STATUS')
+ for phase in installer_phases:
+ phase_title = phase_attributes[phase]['title']
+ padding = max_column - len(phase_title) + 2
+ if phase in stats.custom['_run']:
+ phase_status = stats.custom['_run'][phase]
+ self._display.display(
+ '{}{}: {}'.format(phase_title, ' ' * padding, phase_status),
+ color=self.phase_color(phase_status))
+ if phase_status == 'In Progress' and phase != 'installer_phase_initialize':
+ self._display.display(
+ '\tThis phase can be restarted by running: {}'.format(
+ phase_attributes[phase]['playbook']))
+ else:
+ # Phase was not found in custom stats
+ self._display.display(
+ '{}{}: {}'.format(phase_title, ' ' * padding, 'Not Started'),
+ color=C.COLOR_SKIP)
+
+ self._display.display("", screen_only=True)
+
+ def phase_color(self, status):
+ """ Return color code for installer phase"""
+ valid_status = [
+ 'In Progress',
+ 'Complete',
+ ]
+
+ if status not in valid_status:
+ self._display.warning('Invalid phase status defined: {}'.format(status))
+
+ if status == 'Complete':
+ phase_color = C.COLOR_OK
+ elif status == 'In Progress':
+ phase_color = C.COLOR_ERROR
+ else:
+ phase_color = C.COLOR_WARN
+
+ return phase_color
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 45d7444a4..1e6eb2386 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -745,7 +745,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py
index 231857cca..8c6a81cc8 100644
--- a/roles/lib_openshift/library/oc_adm_csr.py
+++ b/roles/lib_openshift/library/oc_adm_csr.py
@@ -723,7 +723,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 44f3f57d8..4a7847e88 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -731,7 +731,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 687cff579..b8af5cad9 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -717,7 +717,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index ddf5d90b7..3364f8de3 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -717,7 +717,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index c00eee381..c64d7ffd2 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -835,7 +835,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 0c925ab0b..492494bda 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -860,7 +860,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 567ecfd4e..b412ca8af 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -709,7 +709,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 9515de569..8bbc22c49 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -715,7 +715,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index d461e5ae9..ad17051cb 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -759,7 +759,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 22ad58725..74a84ac89 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -726,7 +726,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index b6c6e47d9..eea1516ae 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -699,7 +699,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index f7fc286e0..dc33d3b8a 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -718,7 +718,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 2206878a4..88fd9554d 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -735,7 +735,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 126d7a617..8408f9ebc 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -738,7 +738,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index d20904d0d..d1be0b534 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -670,7 +670,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 91199d093..9a281e6cd 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -727,7 +727,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index f9b2d81fa..b503c330b 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -724,7 +724,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index 895322ba5..7a9e3bf89 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -731,7 +731,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 8f8e46e1e..875e473ad 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -769,7 +769,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 7130cc5fc..ec3635753 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -713,7 +713,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 0c4b99e30..c010607e8 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -765,7 +765,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 7ab139e85..e83a6e26d 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -772,7 +772,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 5d539ced4..0d46bbf96 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -711,7 +711,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 97e213f46..662d77ec1 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -711,7 +711,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py
index 9339a85e5..574f109e4 100644
--- a/roles/lib_openshift/library/oc_storageclass.py
+++ b/roles/lib_openshift/library/oc_storageclass.py
@@ -729,7 +729,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index 2fa349547..e430546ee 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -771,7 +771,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 55e1054e7..a12620968 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -683,7 +683,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 63bad57b4..134b2ad19 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -760,7 +760,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py b/roles/lib_openshift/src/test/integration/filter_plugins/test_filters.py
index f350bd25d..f350bd25d 100644
--- a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py
+++ b/roles/lib_openshift/src/test/integration/filter_plugins/test_filters.py
diff --git a/roles/lib_openshift/src/test/integration/oc_configmap.yml b/roles/lib_openshift/src/test/integration/oc_configmap.yml
index c0d200e73..6a452ccec 100755
--- a/roles/lib_openshift/src/test/integration/oc_configmap.yml
+++ b/roles/lib_openshift/src/test/integration/oc_configmap.yml
@@ -55,7 +55,7 @@
config: "{{ filename }}"
from_literal:
foo: notbar
- deployment_type: online
+ deployment_type: openshift-enterprise
- name: fetch the updated configmap
oc_configmap:
@@ -70,7 +70,7 @@
assert:
that:
- cmout.results.results[0].metadata.name == 'configmaptest'
- - cmout.results.results[0].data.deployment_type == 'online'
+ - cmout.results.results[0].data.deployment_type == 'openshift-enterprise'
- cmout.results.results[0].data.foo == 'notbar'
###### end update test ###########
diff --git a/roles/lib_openshift/src/test/unit/test_oc_configmap.py b/roles/lib_openshift/src/test/unit/test_oc_configmap.py
index 318fd6167..27042c64b 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_configmap.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_configmap.py
@@ -79,7 +79,7 @@ class OCConfigMapTest(unittest.TestCase):
''' Testing a configmap create '''
params = copy.deepcopy(OCConfigMapTest.params)
params['from_file'] = {'test': '/root/file'}
- params['from_literal'] = {'foo': 'bar', 'deployment_type': 'online'}
+ params['from_literal'] = {'foo': 'bar', 'deployment_type': 'openshift-enterprise'}
configmap = '''{
"apiVersion": "v1",
@@ -100,7 +100,7 @@ class OCConfigMapTest(unittest.TestCase):
"apiVersion": "v1",
"data": {
"foo": "bar",
- "deployment_type": "online",
+ "deployment_type": "openshift-enterprise",
"test": "this is a file\\n"
},
"kind": "ConfigMap",
@@ -128,7 +128,7 @@ class OCConfigMapTest(unittest.TestCase):
self.assertTrue(results['changed'])
self.assertEqual(results['results']['results'][0]['metadata']['name'], 'configmap')
- self.assertEqual(results['results']['results'][0]['data']['deployment_type'], 'online')
+ self.assertEqual(results['results']['results'][0]['data']['deployment_type'], 'openshift-enterprise')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py
index 95a305b58..e5ac1f74f 100644
--- a/roles/lib_utils/library/repoquery.py
+++ b/roles/lib_utils/library/repoquery.py
@@ -35,6 +35,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -618,17 +619,22 @@ def main():
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
+ retries=dict(default=4, required=False, type='int'),
+ retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
- rval = Repoquery.run_ansible(module.params, module.check_mode)
-
- if 'failed' in rval:
- module.fail_json(**rval)
-
- module.exit_json(**rval)
+ tries = 1
+ while True:
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+ if 'failed' not in rval:
+ module.exit_json(**rval)
+ elif tries > module.params['retries']:
+ module.fail_json(**rval)
+ tries += 1
+ time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index baf72fe47..cf5c2e423 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -35,6 +35,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -792,7 +793,7 @@ class Yedit(object):
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py
index 40773b1c1..5f5b93639 100644
--- a/roles/lib_utils/src/ansible/repoquery.py
+++ b/roles/lib_utils/src/ansible/repoquery.py
@@ -19,17 +19,22 @@ def main():
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
+ retries=dict(default=4, required=False, type='int'),
+ retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
- rval = Repoquery.run_ansible(module.params, module.check_mode)
-
- if 'failed' in rval:
- module.fail_json(**rval)
-
- module.exit_json(**rval)
+ tries = 1
+ while True:
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+ if 'failed' not in rval:
+ module.exit_json(**rval)
+ elif tries > module.params['retries']:
+ module.fail_json(**rval)
+ tries += 1
+ time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
index 957c35a06..0a4fbe07a 100644
--- a/roles/lib_utils/src/class/yedit.py
+++ b/roles/lib_utils/src/class/yedit.py
@@ -590,7 +590,7 @@ class Yedit(object):
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py
index 567f8c9e0..07a04b7ae 100644
--- a/roles/lib_utils/src/lib/import.py
+++ b/roles/lib_utils/src/lib/import.py
@@ -10,6 +10,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml
index 3da340c85..e2f7af5ad 100644
--- a/roles/nuage_master/meta/main.yml
+++ b/roles/nuage_master/meta/main.yml
@@ -13,8 +13,5 @@ galaxy_info:
- cloud
- system
dependencies:
-- role: nuage_ca
-- role: nuage_common
-- role: openshift_etcd_client_certificates
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2
index 612d689c2..7be5d6743 100755
--- a/roles/nuage_master/templates/nuage-master-config-daemonset.j2
+++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2
@@ -62,16 +62,14 @@ spec:
selector:
matchLabels:
k8s-app: nuage-master-config
+ updateStrategy:
+ type: RollingUpdate
template:
metadata:
labels:
k8s-app: nuage-master-config
spec:
hostNetwork: true
- tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- operator: Exists
nodeSelector:
install-monitor: "true"
containers:
diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2
index 02e9a1563..6a1267d94 100755
--- a/roles/nuage_master/templates/nuage-node-config-daemonset.j2
+++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2
@@ -23,7 +23,7 @@ data:
# IP address and port number of master API server
masterApiServer: {{ api_server_url }}
# REST server URL
- nuageMonRestServer: {{ nuage_mon_rest_server_url }}
+ nuageMonRestServer: https://{{ openshift_master_cluster_hostname }}:{{ nuage_mon_rest_server_port }}
# Bridge name for the docker bridge
dockerBridgeName: docker0
# Certificate for connecting to the openshift monitor REST api
@@ -32,11 +32,6 @@ data:
nuageMonClientKey: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonClient.key
# CA certificate for verifying the master's rest server
nuageMonServerCA: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonCA.crt
- # Nuage vport mtu size
- interfaceMTU: {{ nuage_vport_mtu }}
- # Logging level for the plugin
- # allowed options are: "dbg", "info", "warn", "err", "emer", "off"
- logLevel: 3
# This will generate the required Nuage CNI yaml configuration
cni_yaml_config: |
@@ -72,10 +67,6 @@ spec:
k8s-app: nuage-cni-ds
spec:
hostNetwork: true
- tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- operator: Exists
containers:
# This container installs Nuage CNI binaries
# and CNI network config file on each node.
@@ -157,10 +148,6 @@ spec:
k8s-app: nuage-vrs-ds
spec:
hostNetwork: true
- tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- operator: Exists
containers:
# This container installs Nuage VRS running as a
# container on each worker node
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index d8bfca62a..fdf01b7c2 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -24,4 +24,4 @@ cni_bin_dir: "/opt/cni/bin/"
nuage_plugin_crt_dir: /usr/share/vsp-openshift
openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node
-nuage_atomic_docker_additional_mounts: "DOCKER_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
+nuage_atomic_docker_additional_mounts: "NUAGE_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 4e7f54f79..ca39c1aec 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -13,10 +13,10 @@ openshift_aws_wait_for_ssh: True
openshift_aws_clusterid: default
openshift_aws_region: us-east-1
openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
+openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
openshift_aws_iam_cert_path: ''
-openshift_aws_iam_cert_chain_path: ''
openshift_aws_iam_cert_key_path: ''
openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}"
diff --git a/roles/openshift_aws/filter_plugins/filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
index 06e1f9602..06e1f9602 100644
--- a/roles/openshift_aws/filter_plugins/filters.py
+++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/build_ami.yml
index 8d4e5ac43..48555e5da 100644
--- a/roles/openshift_aws/tasks/build_ami.yml
+++ b/roles/openshift_aws/tasks/build_ami.yml
@@ -31,7 +31,7 @@
assign_public_ip: yes
region: "{{ openshift_aws_region }}"
key_name: "{{ openshift_aws_ssh_key_name }}"
- group: "{{ openshift_aws_clusterid }}"
+ group: "{{ openshift_aws_build_ami_group }}"
instance_type: m4.xlarge
vpc_subnet_id: "{{ subnetout.subnets[0].id }}"
image: "{{ openshift_aws_base_ami }}"
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
index a1fdd66fc..7bc3184df 100644
--- a/roles/openshift_aws/tasks/elb.yml
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -29,9 +29,9 @@
if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
else openshift_aws_elb_listeners }}"
-- name: "Create ELB {{ openshift_aws_elb_name }}"
+- name: "Create ELB {{ l_openshift_aws_elb_name }}"
ec2_elb_lb:
- name: "{{ openshift_aws_elb_name }}"
+ name: "{{ l_openshift_aws_elb_name }}"
state: present
security_group_names: "{{ openshift_aws_elb_security_groups }}"
idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
@@ -49,10 +49,10 @@
# It is necessary to ignore_errors here because the instances are not in 'ready'
# state when first added to ELB
-- name: "Add instances to ELB {{ openshift_aws_elb_name }}"
+- name: "Add instances to ELB {{ l_openshift_aws_elb_name }}"
ec2_elb:
instance_id: "{{ item.id }}"
- ec2_elbs: "{{ openshift_aws_elb_name }}"
+ ec2_elbs: "{{ l_openshift_aws_elb_name }}"
state: present
region: "{{ openshift_aws_region }}"
wait: False
diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml
index cd9772a25..f74a62b8b 100644
--- a/roles/openshift_aws/tasks/iam_cert.yml
+++ b/roles/openshift_aws/tasks/iam_cert.yml
@@ -11,17 +11,23 @@
- "'failed' in elb_cert_chain"
- elb_cert_chain.failed
- "'msg' in elb_cert_chain"
- - "'already exists and has a different certificate body' in elb_cert_chain.msg"
- - "'BotoServerError' in elb_cert_chain.msg"
+ - "'already exists and has a different certificate body' in elb_cert_chain.msg or 'BotoServerError' in elb_cert_chain.msg or 'Traceback' in elb_cert_chain.msg.module_stderr"
when:
- openshift_aws_create_iam_cert | bool
- openshift_aws_iam_cert_path != ''
- openshift_aws_iam_cert_key_path != ''
- openshift_aws_elb_cert_arn == ''
+- debug: msg="{{ elb_cert_chain }}"
+
- name: set_fact openshift_aws_elb_cert_arn
set_fact:
openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}"
+ when:
+ - openshift_aws_create_iam_cert | bool
+ - openshift_aws_iam_cert_path != ''
+ - openshift_aws_iam_cert_key_path != ''
+ - openshift_aws_elb_cert_arn == ''
- name: wait for cert to propagate
pause:
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 189caeaee..a2920b744 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -34,14 +34,14 @@
include: elb.yml
vars:
openshift_aws_elb_direction: internal
- openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-internal"
+ l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-internal"
openshift_aws_elb_scheme: internal
- name: create our master external load balancers
include: elb.yml
vars:
openshift_aws_elb_direction: external
- openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-external"
+ l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-external"
openshift_aws_elb_scheme: internet-facing
- name: wait for ssh to become available
diff --git a/roles/openshift_ca/defaults/main.yml b/roles/openshift_ca/defaults/main.yml
index ecfcc88b3..742b15df4 100644
--- a/roles/openshift_ca/defaults/main.yml
+++ b/roles/openshift_ca/defaults/main.yml
@@ -1,3 +1,11 @@
---
openshift_ca_cert_expire_days: 1825
openshift_master_cert_expire_days: 730
+
+openshift_ca_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
+openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
+openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
+openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig"
+
+openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml
index dfbdf0cc7..f8b784a63 100644
--- a/roles/openshift_ca/meta/main.yml
+++ b/roles/openshift_ca/meta/main.yml
@@ -14,4 +14,3 @@ galaxy_info:
- system
dependencies:
- role: openshift_cli
-- role: openshift_named_certificates
diff --git a/roles/openshift_ca/vars/main.yml b/roles/openshift_ca/vars/main.yml
index d04c1766d..4d80bf921 100644
--- a/roles/openshift_ca/vars/main.yml
+++ b/roles/openshift_ca/vars/main.yml
@@ -1,9 +1,2 @@
---
-openshift_ca_config_dir: "{{ openshift.common.config_base }}/master"
-openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
-openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
-openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
-openshift_version: "{{ openshift_pkg_version | default('') }}"
-
-openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig"
loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
diff --git a/roles/openshift_default_storage_class/README.md b/roles/openshift_default_storage_class/README.md
index 198163127..57e732f37 100644
--- a/roles/openshift_default_storage_class/README.md
+++ b/roles/openshift_default_storage_class/README.md
@@ -1,7 +1,7 @@
openshift_master_storage_class
=========
-A role that deploys configuratons for Openshift StorageClass
+A role that deploys configurations for Openshift StorageClass
Requirements
------------
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 334150f63..5a3e50678 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -6,9 +6,6 @@
with_items:
- role: docker
local_facts:
- additional_registries: "{{ openshift_docker_additional_registries | default(None) }}"
- blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}"
- insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}"
selinux_enabled: "{{ openshift_docker_selinux_enabled | default(None) }}"
log_driver: "{{ openshift_docker_log_driver | default(None) }}"
log_options: "{{ openshift_docker_log_options | default(None) }}"
@@ -23,12 +20,6 @@
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- set_fact:
- docker_additional_registries: "{{ openshift.docker.additional_registries
- | default(omit) }}"
- docker_blocked_registries: "{{ openshift.docker.blocked_registries
- | default(omit) }}"
- docker_insecure_registries: "{{ openshift.docker.insecure_registries
- | default(omit) }}"
docker_selinux_enabled: "{{ openshift.docker.selinux_enabled | default(omit) }}"
docker_log_driver: "{{ openshift.docker.log_driver | default(omit) }}"
docker_log_options: "{{ openshift.docker.log_options | default(omit) }}"
diff --git a/roles/openshift_etcd_ca/meta/main.yml b/roles/openshift_etcd_ca/meta/main.yml
deleted file mode 100644
index f1d669d6b..000000000
--- a/roles/openshift_etcd_ca/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-galaxy_info:
- author: Tim Bielawa
- description: Meta role around the etcd_ca role
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- role: openshift_etcd_facts
-- role: etcd_ca
- when: (etcd_ca_setup | default(True) | bool)
diff --git a/roles/openshift_etcd_client_certificates/meta/main.yml b/roles/openshift_etcd_client_certificates/meta/main.yml
index 3268c390f..fbc72c8a3 100644
--- a/roles/openshift_etcd_client_certificates/meta/main.yml
+++ b/roles/openshift_etcd_client_certificates/meta/main.yml
@@ -11,6 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- role: openshift_etcd_facts
-- role: etcd_client_certificates
+dependencies: []
diff --git a/roles/openshift_etcd_client_certificates/tasks/main.yml b/roles/openshift_etcd_client_certificates/tasks/main.yml
new file mode 100644
index 000000000..7f8b667f0
--- /dev/null
+++ b/roles/openshift_etcd_client_certificates/tasks/main.yml
@@ -0,0 +1,4 @@
+---
+- include_role:
+ name: etcd
+ tasks_from: client_certificates
diff --git a/roles/openshift_etcd_server_certificates/meta/main.yml b/roles/openshift_etcd_server_certificates/meta/main.yml
deleted file mode 100644
index 7750f14af..000000000
--- a/roles/openshift_etcd_server_certificates/meta/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: OpenShift Etcd Server Certificates
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_etcd_facts
-- role: etcd_server_certificates
diff --git a/roles/openshift_examples/README.md b/roles/openshift_examples/README.md
index 8cc479c73..014cef111 100644
--- a/roles/openshift_examples/README.md
+++ b/roles/openshift_examples/README.md
@@ -21,13 +21,13 @@ Facts
Role Variables
--------------
-| Name | Default value | |
-|-------------------------------------|-----------------------------------------------------|------------------------------------------|
-| openshift_examples_load_centos | true when openshift_deployment_typenot 'enterprise' | Load centos image streams |
-| openshift_examples_load_rhel | true if openshift_deployment_type is 'enterprise' | Load rhel image streams |
-| openshift_examples_load_db_templates| true | Loads database templates |
-| openshift_examples_load_quickstarts | true | Loads quickstarts ie: nodejs, rails, etc |
-| openshift_examples_load_xpaas | false | Loads xpass streams and templates |
+| Name | Default value | |
+|-------------------------------------|----------------------------------------------------------------|------------------------------------------|
+| openshift_examples_load_centos | true when openshift_deployment_type not 'openshift-enterprise' | Load centos image streams |
+| openshift_examples_load_rhel | true if openshift_deployment_type is 'openshift-enterprise' | Load rhel image streams |
+| openshift_examples_load_db_templates| true | Loads database templates |
+| openshift_examples_load_quickstarts | true | Loads quickstarts ie: nodejs, rails, etc |
+| openshift_examples_load_xpaas | false | Loads xpass streams and templates |
Dependencies
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index ebfa6bb8f..215ff4b72 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -55,9 +55,6 @@ def migrate_docker_facts(facts):
""" Apply migrations for docker facts """
params = {
'common': (
- 'additional_registries',
- 'insecure_registries',
- 'blocked_registries',
'options'
),
'node': (
@@ -477,11 +474,7 @@ def set_selectors(facts):
facts if they were not already present
"""
- deployment_type = facts['common']['deployment_type']
- if deployment_type == 'online':
- selector = "type=infra"
- else:
- selector = "region=infra"
+ selector = "region=infra"
if 'hosted' not in facts:
facts['hosted'] = {}
@@ -497,10 +490,10 @@ def set_selectors(facts):
facts['hosted']['metrics'] = {}
if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
facts['hosted']['metrics']['selector'] = None
- if 'logging' not in facts['hosted']:
- facts['hosted']['logging'] = {}
- if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
- facts['hosted']['logging']['selector'] = None
+ if 'logging' not in facts:
+ facts['logging'] = {}
+ if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']:
+ facts['logging']['selector'] = None
if 'etcd' not in facts['hosted']:
facts['hosted']['etcd'] = {}
if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
@@ -568,7 +561,7 @@ def set_identity_providers_if_unset(facts):
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
- if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
+ if deployment_type == 'openshift-enterprise':
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
@@ -770,47 +763,28 @@ def set_deployment_facts_if_unset(facts):
service_type = 'atomic-openshift'
if deployment_type == 'origin':
service_type = 'origin'
- elif deployment_type in ['enterprise']:
- service_type = 'openshift'
facts['common']['service_type'] = service_type
- if 'docker' in facts:
- deployment_type = facts['common']['deployment_type']
- if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
- addtl_regs = facts['docker'].get('additional_registries', [])
- ent_reg = 'registry.access.redhat.com'
- if ent_reg not in addtl_regs:
- facts['docker']['additional_registries'] = addtl_regs + [ent_reg]
-
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
if 'registry_url' not in facts[role]:
registry_url = 'openshift/origin-${component}:${version}'
- if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
+ if deployment_type == 'openshift-enterprise':
registry_url = 'openshift3/ose-${component}:${version}'
- elif deployment_type == 'atomic-enterprise':
- registry_url = 'aep3_beta/aep-${component}:${version}'
facts[role]['registry_url'] = registry_url
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
- if 'disabled_features' in facts['master']:
- if deployment_type == 'atomic-enterprise':
- curr_disabled_features = set(facts['master']['disabled_features'])
- facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
- else:
+ if 'disabled_features' not in facts['master']:
if facts['common']['deployment_subtype'] == 'registry':
facts['master']['disabled_features'] = openshift_features
if 'node' in facts:
deployment_type = facts['common']['deployment_type']
if 'storage_plugin_deps' not in facts['node']:
- if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']:
- facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
- else:
- facts['node']['storage_plugin_deps'] = []
+ facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
return facts
@@ -1602,11 +1576,13 @@ def set_builddefaults_facts(facts):
builddefaults['git_no_proxy'] = builddefaults['no_proxy']
# If we're actually defining a builddefaults config then create admission_plugin_config
# then merge builddefaults[config] structure into admission_plugin_config
+
+ # 'config' is the 'openshift_builddefaults_json' inventory variable
if 'config' in builddefaults:
if 'admission_plugin_config' not in facts['master']:
- facts['master']['admission_plugin_config'] = dict()
+ # Scaffold out the full expected datastructure
+ facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
- # if the user didn't actually provide proxy values, delete the proxy env variable defaults.
delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
@@ -1669,7 +1645,7 @@ def set_container_facts_if_unset(facts):
facts
"""
deployment_type = facts['common']['deployment_type']
- if deployment_type in ['enterprise', 'openshift-enterprise']:
+ if deployment_type == 'openshift-enterprise':
master_image = 'openshift3/ose'
cli_image = master_image
node_image = 'openshift3/node'
@@ -1679,16 +1655,6 @@ def set_container_facts_if_unset(facts):
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
deployer_image = 'openshift3/ose-deployer'
- elif deployment_type == 'atomic-enterprise':
- master_image = 'aep3_beta/aep'
- cli_image = master_image
- node_image = 'aep3_beta/node'
- ovs_image = 'aep3_beta/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
- pod_image = 'aep3_beta/aep-pod'
- router_image = 'aep3_beta/aep-haproxy-router'
- registry_image = 'aep3_beta/aep-docker-registry'
- deployer_image = 'aep3_beta/aep-deployer'
else:
master_image = 'openshift/origin'
cli_image = master_image
@@ -1703,7 +1669,9 @@ def set_container_facts_if_unset(facts):
facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
# If openshift_docker_use_system_container is set and is True ....
if 'use_system_container' in list(facts['docker'].keys()):
- if facts['docker']['use_system_container']:
+ # use safe_get_bool as the inventory variable may not be a
+ # valid boolean on it's own.
+ if safe_get_bool(facts['docker']['use_system_container']):
# ... set the service name to container-engine
facts['docker']['service_name'] = 'container-engine'
@@ -1808,7 +1776,10 @@ class OpenShiftFacts(object):
'etcd',
'hosted',
'master',
- 'node']
+ 'node',
+ 'logging',
+ 'loggingops',
+ 'metrics']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
@@ -1925,7 +1896,7 @@ class OpenShiftFacts(object):
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
self.system_facts['ansible_fqdn']]
- hostname = choose_hostname(hostname_values, ip_addr)
+ hostname = choose_hostname(hostname_values, ip_addr).lower()
defaults['common'] = dict(ip=ip_addr,
public_ip=ip_addr,
@@ -1989,66 +1960,6 @@ class OpenShiftFacts(object):
if 'hosted' in roles or self.role == 'hosted':
defaults['hosted'] = dict(
- metrics=dict(
- deploy=False,
- duration=7,
- resolution='10s',
- storage=dict(
- kind=None,
- volume=dict(
- name='metrics',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
- loggingops=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es-ops',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
- logging=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
etcd=dict(
storage=dict(
kind=None,
@@ -2095,6 +2006,69 @@ class OpenShiftFacts(object):
router=dict()
)
+ defaults['logging'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='logging-es',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
+ defaults['loggingops'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='logging-es-ops',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
+ defaults['metrics'] = dict(
+ deploy=False,
+ duration=7,
+ resolution='10s',
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='metrics',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
return defaults
def guess_host_provider(self):
@@ -2265,19 +2239,6 @@ class OpenShiftFacts(object):
protected_facts_to_overwrite)
if 'docker' in new_local_facts:
- # remove duplicate and empty strings from registry lists, preserving order
- for cat in ['additional', 'blocked', 'insecure']:
- key = '{0}_registries'.format(cat)
- if key in new_local_facts['docker']:
- val = new_local_facts['docker'][key]
- if isinstance(val, string_types):
- val = [x.strip() for x in val.split(',')]
- seen = set()
- new_local_facts['docker'][key] = list()
- for registry in val:
- if registry not in seen and registry != '':
- seen.add(registry)
- new_local_facts['docker'][key].append(registry)
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
isinstance(new_local_facts['docker']['log_options'], string_types):
diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yaml
new file mode 100644
index 000000000..ad205ba33
--- /dev/null
+++ b/roles/openshift_gcp/tasks/main.yaml
@@ -0,0 +1,43 @@
+#
+# This role relies on gcloud invoked via templated bash in order to
+# provide a high performance deployment option. The next logical step
+# is to transition to a deployment manager template which is then instantiated.
+# TODO: use a formal set of role parameters consistent with openshift_aws
+#
+---
+- name: Templatize DNS script
+ template: src=dns.j2.sh dest=/tmp/openshift_gcp_provision_dns.sh mode=u+rx
+- name: Templatize provision script
+ template: src=provision.j2.sh dest=/tmp/openshift_gcp_provision.sh mode=u+rx
+- name: Templatize de-provision script
+ template: src=remove.j2.sh dest=/tmp/openshift_gcp_provision_remove.sh mode=u+rx
+ when:
+ - state | default('present') == 'absent'
+
+- name: Provision GCP DNS domain
+ command: /tmp/openshift_gcp_provision_dns.sh
+ args:
+ chdir: "{{ playbook_dir }}/files"
+ register: dns_provision
+ when:
+ - state | default('present') == 'present'
+
+- name: Ensure that DNS resolves to the hosted zone
+ assert:
+ that:
+ - "lookup('dig', public_hosted_zone, 'qtype=NS', wantlist=True) | sort | join(',') == dns_provision.stdout"
+ msg: "The DNS domain {{ public_hosted_zone }} defined in 'public_hosted_zone' must have NS records pointing to the Google nameservers: '{{ dns_provision.stdout }}' instead of '{{ lookup('dig', public_hosted_zone, 'qtype=NS') }}'."
+ when:
+ - state | default('present') == 'present'
+
+- name: Provision GCP resources
+ command: /tmp/openshift_gcp_provision.sh
+ args:
+ chdir: "{{ playbook_dir }}/files"
+ when:
+ - state | default('present') == 'present'
+
+- name: De-provision GCP resources
+ command: /tmp/openshift_gcp_provision_remove.sh
+ when:
+ - state | default('present') == 'absent'
diff --git a/roles/openshift_gcp/templates/dns.j2.sh b/roles/openshift_gcp/templates/dns.j2.sh
new file mode 100644
index 000000000..eacf84b4d
--- /dev/null
+++ b/roles/openshift_gcp/templates/dns.j2.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -euo pipefail
+
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+
+# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist
+if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null
+fi
+
+# Always output the expected nameservers as a comma delimited list
+gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ','
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
new file mode 100644
index 000000000..e68e9683f
--- /dev/null
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -0,0 +1,318 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# Create SSH key for GCE
+if [ ! -f "{{ gce_ssh_private_key }}" ]; then
+ ssh-keygen -t rsa -f "{{ gce_ssh_private_key }}" -C gce-provision-cloud-user -N ''
+ ssh-add "{{ gce_ssh_private_key }}" || true
+fi
+
+# Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
+pub_key=$(cut -d ' ' -f 2 < "{{ gce_ssh_private_key }}.pub")
+key_tmp_file='/tmp/ocp-gce-keys'
+if ! gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q "$pub_key"; then
+ if gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q ssh-rsa; then
+ gcloud --project "{{ gce_project_id }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
+ fi
+ echo -n 'cloud-user:' >> "$key_tmp_file"
+ cat "{{ gce_ssh_private_key }}.pub" >> "$key_tmp_file"
+ gcloud --project "{{ gce_project_id }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
+ rm -f "$key_tmp_file"
+fi
+
+metadata=""
+if [[ -n "{{ provision_gce_startup_script_file }}" ]]; then
+ if [[ ! -f "{{ provision_gce_startup_script_file }}" ]]; then
+ echo "Startup script file missing at {{ provision_gce_startup_script_file }} from=$(pwd)"
+ exit 1
+ fi
+ metadata+="--metadata-from-file=startup-script={{ provision_gce_startup_script_file }}"
+fi
+if [[ -n "{{ provision_gce_user_data_file }}" ]]; then
+ if [[ ! -f "{{ provision_gce_user_data_file }}" ]]; then
+ echo "User data file missing at {{ provision_gce_user_data_file }}"
+ exit 1
+ fi
+ if [[ -n "${metadata}" ]]; then
+ metadata+=","
+ else
+ metadata="--metadata-from-file="
+ fi
+ metadata+="user-data={{ provision_gce_user_data_file }}"
+fi
+
+# Select image or image family
+image="{{ provision_gce_registered_image }}"
+if ! gcloud --project "{{ gce_project_id }}" compute images describe "${image}" &>/dev/null; then
+ if ! gcloud --project "{{ gce_project_id }}" compute images describe-from-family "${image}" &>/dev/null; then
+ echo "No compute image or image-family found, create an image named '{{ provision_gce_registered_image }}' to continue'"
+ exit 1
+ fi
+ image="family/${image}"
+fi
+
+### PROVISION THE INFRASTRUCTURE ###
+
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+
+# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
+if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
+ exit 1
+fi
+
+# Create network
+if ! gcloud --project "{{ gce_project_id }}" compute networks describe "{{ gce_network_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute networks create "{{ gce_network_name }}" --mode "auto"
+else
+ echo "Network '{{ gce_network_name }}' already exists"
+fi
+
+# Firewall rules in a form:
+# ['name']='parameters for "gcloud compute firewall-rules create"'
+# For all possible parameters see: gcloud compute firewall-rules create --help
+range=""
+if [[ -n "{{ openshift_node_port_range }}" ]]; then
+ range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}"
+fi
+declare -A FW_RULES=(
+ ['icmp']='--allow icmp'
+ ['ssh-external']='--allow tcp:22'
+ ['ssh-internal']='--allow tcp:22 --source-tags bastion'
+ ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master"
+ ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master"
+ ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255 --source-tags ocp --target-tags ocp-node,ocp-infra-node"
+ ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node"
+ ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
+)
+for rule in "${!FW_RULES[@]}"; do
+ ( if ! gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute firewall-rules create "{{ provision_prefix }}$rule" --network "{{ gce_network_name }}" ${FW_RULES[$rule]}
+ else
+ echo "Firewall rule '{{ provision_prefix }}${rule}' already exists"
+ fi ) &
+done
+
+
+# Master IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-ssl-lb-ip" --global
+else
+ echo "IP '{{ provision_prefix }}master-ssl-lb-ip' already exists"
+fi ) &
+
+# Internal master IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}"
+else
+ echo "IP '{{ provision_prefix }}master-network-lb-ip' already exists"
+fi ) &
+
+# Router IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}"
+else
+ echo "IP '{{ provision_prefix }}router-network-lb-ip' already exists"
+fi ) &
+
+
+{% for node_group in provision_gce_node_groups %}
+# configure {{ node_group.name }}
+(
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-templates describe "{{ provision_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute instance-templates create "{{ provision_prefix }}instance-template-{{ node_group.name }}" \
+ --machine-type "{{ node_group.machine_type }}" --network "{{ gce_network_name }}" \
+ --tags "{{ provision_prefix }}ocp,ocp,{{ node_group.tags }}" \
+ --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
+ --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
+ --image "${image}" ${metadata}
+ else
+ echo "Instance template '{{ provision_prefix }}instance-template-{{ node_group.name }}' already exists"
+ fi
+
+ # Create instance group
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed describe "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed create "{{ provision_prefix }}ig-{{ node_group.suffix }}" \
+ --zone "{{ gce_zone_name }}" --template "{{ provision_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
+ else
+ echo "Instance group '{{ provision_prefix }}ig-{{ node_group.suffix }}' already exists"
+ fi
+) &
+{% endfor %}
+
+for i in `jobs -p`; do wait $i; done
+
+
+# Configure the master external LB rules
+(
+# Master health check
+if ! gcloud --project "{{ gce_project_id }}" compute health-checks describe "{{ provision_prefix }}master-ssl-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute health-checks create https "{{ provision_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}master-ssl-lb-health-check' already exists"
+fi
+
+gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-named-ports "{{ provision_prefix }}ig-m" \
+ --zone "{{ gce_zone_name }}" --named-ports "{{ provision_prefix }}port-name-master:{{ internal_console_port }}"
+
+# Master backend service
+if ! gcloud --project "{{ gce_project_id }}" compute backend-services describe "{{ provision_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute backend-services create "{{ provision_prefix }}master-ssl-lb-backend" --health-checks "{{ provision_prefix }}master-ssl-lb-health-check" --port-name "{{ provision_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ provision_gce_master_https_timeout | default('2m') }}"
+ gcloud --project "{{ gce_project_id }}" compute backend-services add-backend "{{ provision_prefix }}master-ssl-lb-backend" --instance-group "{{ provision_prefix }}ig-m" --global --instance-group-zone "{{ gce_zone_name }}"
+else
+ echo "Backend service '{{ provision_prefix }}master-ssl-lb-backend' already exists"
+fi
+
+# Master tcp proxy target
+if ! gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies describe "{{ provision_prefix }}master-ssl-lb-target" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies create "{{ provision_prefix }}master-ssl-lb-target" --backend-service "{{ provision_prefix }}master-ssl-lb-backend"
+else
+ echo "Proxy target '{{ provision_prefix }}master-ssl-lb-target' already exists"
+fi
+
+# Master forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ provision_prefix }}master-ssl-lb-target"
+else
+ echo "Forwarding rule '{{ provision_prefix }}master-ssl-lb-rule' already exists"
+fi
+) &
+
+
+# Configure the master internal LB rules
+(
+# Internal master health check
+if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}master-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}master-network-lb-health-check' already exists"
+fi
+
+# Internal master target pool
+if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}master-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}master-network-lb-pool" --http-health-check "{{ provision_prefix }}master-network-lb-health-check" --region "{{ gce_region_name }}"
+else
+ echo "Target pool '{{ provision_prefix }}master-network-lb-pool' already exists"
+fi
+
+# Internal master forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}master-network-lb-pool"
+else
+ echo "Forwarding rule '{{ provision_prefix }}master-network-lb-rule' already exists"
+fi
+) &
+
+
+# Configure the infra node rules
+(
+# Router health check
+if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}router-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}router-network-lb-health-check' already exists"
+fi
+
+# Router target pool
+if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}router-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}router-network-lb-pool" --http-health-check "{{ provision_prefix }}router-network-lb-health-check" --region "{{ gce_region_name }}"
+else
+ echo "Target pool '{{ provision_prefix }}router-network-lb-pool' already exists"
+fi
+
+# Router forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}router-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}router-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}router-network-lb-pool"
+else
+ echo "Forwarding rule '{{ provision_prefix }}router-network-lb-rule' already exists"
+fi
+) &
+
+for i in `jobs -p`; do wait $i; done
+
+# set the target pools
+(
+if [[ "ig-m" == "{{ provision_gce_router_network_instance_group }}" ]]; then
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool,{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
+else
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool" --zone "{{ gce_zone_name }}"
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}{{ provision_gce_router_network_instance_group }}" --target-pools "{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
+fi
+) &
+
+# configure DNS
+(
+# Retry DNS changes until they succeed since this may be a shared resource
+while true; do
+ dns="${TMPDIR:-/tmp}/dns.yaml"
+ rm -f $dns
+
+ # DNS record for master lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
+ else
+ echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
+ fi
+
+ # DNS record for internal master lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
+ else
+ echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
+ fi
+
+ # DNS record for router lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP"
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
+ else
+ echo "DNS record for '{{ wildcard_zone }}' already exists"
+ fi
+
+ # Commit all DNS changes, retrying if preconditions are not met
+ if [[ -f $dns ]]; then
+ if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ rc=$?
+ if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+ continue
+ fi
+ exit $rc
+ fi
+ fi
+ break
+done
+) &
+
+# Create bucket for registry
+(
+if ! gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
+ gsutil mb -p "{{ gce_project_id }}" -l "{{ gce_region_name }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
+else
+ echo "Bucket '{{ openshift_hosted_registry_storage_gcs_bucket }}' already exists"
+fi
+) &
+
+# wait until all node groups are stable
+{% for node_group in provision_gce_node_groups %}
+# wait for stable {{ node_group.name }}
+( gcloud --project "{{ gce_project_id }}" compute instance-groups managed wait-until-stable "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --timeout=300) &
+{% endfor %}
+
+
+for i in `jobs -p`; do wait $i; done
diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh
new file mode 100644
index 000000000..41ceab2b5
--- /dev/null
+++ b/roles/openshift_gcp/templates/remove.j2.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+
+set -euo pipefail
+
+function teardown_cmd() {
+ a=( $@ )
+ local name=$1
+ a=( "${a[@]:1}" )
+ local flag=0
+ local found=
+ for i in ${a[@]}; do
+ if [[ "$i" == "--"* ]]; then
+ found=true
+ break
+ fi
+ flag=$((flag+1))
+ done
+ if [[ -z "${found}" ]]; then
+ flag=$((flag+1))
+ fi
+ if gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag}
+ fi
+}
+
+function teardown() {
+ for i in `seq 1 20`; do
+ if teardown_cmd $@; then
+ break
+ fi
+ sleep 0.5
+ done
+}
+
+# Preemptively spin down the instances
+{% for node_group in provision_gce_node_groups %}
+# scale down {{ node_group.name }}
+(
+ # performs a delete and scale down as one operation to ensure maximum parallelism
+ if ! instances=$( gcloud --project "{{ gce_project_id }}" compute instance-groups managed list-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --format='value[terminator=","](instance)' ); then
+ exit 0
+ fi
+ instances="${instances%?}"
+ if [[ -z "${instances}" ]]; then
+ echo "warning: No instances in {{ node_group.name }}" 1>&2
+ exit 0
+ fi
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed delete-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --instances "${instances}"; then
+ echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2
+ exit 0
+ fi
+) &
+{% endfor %}
+
+# Bucket for registry
+(
+if gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
+ gsutil -m rm -r "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
+fi
+) &
+
+# DNS
+(
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ # Retry DNS changes until they succeed since this may be a shared resource
+ while true; do
+ dns="${TMPDIR:-/tmp}/dns.yaml"
+ rm -f "${dns}"
+
+ # export all dns records that match into a zone format, and turn each line into a set of args for
+ # record-sets transaction.
+ gcloud dns record-sets export --project "{{ gce_project_id }}" -z "${dns_zone}" --zone-file-format "${dns}"
+ if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \
+ awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input"
+ then
+ rm -f "${dns}"
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ cat "${dns}.input" | xargs -L1 gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}"
+
+ # Commit all DNS changes, retrying if preconditions are not met
+ if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ rc=$?
+ if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+ continue
+ fi
+ exit $rc
+ fi
+ fi
+ rm "${dns}.input"
+ break
+ done
+fi
+) &
+
+(
+# Router network rules
+teardown "{{ provision_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}router-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}router-network-lb-health-check" compute http-health-checks
+teardown "{{ provision_prefix }}router-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
+
+# Internal master network rules
+teardown "{{ provision_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}master-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}master-network-lb-health-check" compute http-health-checks
+teardown "{{ provision_prefix }}master-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
+) &
+
+(
+# Master SSL network rules
+teardown "{{ provision_prefix }}master-ssl-lb-rule" compute forwarding-rules --global
+teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-tcp-proxies
+teardown "{{ provision_prefix }}master-ssl-lb-ip" compute addresses --global
+teardown "{{ provision_prefix }}master-ssl-lb-backend" compute backend-services --global
+teardown "{{ provision_prefix }}master-ssl-lb-health-check" compute health-checks
+) &
+
+#Firewall rules
+#['name']='parameters for "gcloud compute firewall-rules create"'
+#For all possible parameters see: gcloud compute firewall-rules create --help
+declare -A FW_RULES=(
+ ['icmp']=""
+ ['ssh-external']=""
+ ['ssh-internal']=""
+ ['master-internal']=""
+ ['master-external']=""
+ ['node-internal']=""
+ ['infra-node-internal']=""
+ ['infra-node-external']=""
+)
+for rule in "${!FW_RULES[@]}"; do
+ ( if gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
+ # retry a few times because this call can be flaky
+ for i in `seq 1 3`; do
+ if gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"; then
+ break
+ fi
+ done
+ fi ) &
+done
+
+for i in `jobs -p`; do wait $i; done
+
+{% for node_group in provision_gce_node_groups %}
+# teardown {{ node_group.name }} - any load balancers referencing these groups must be removed
+(
+ teardown "{{ provision_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ gce_zone_name }}"
+ teardown "{{ provision_prefix }}instance-template-{{ node_group.name }}" compute instance-templates
+) &
+{% endfor %}
+
+for i in `jobs -p`; do wait $i; done
+
+# Network
+teardown "{{ gce_network_name }}" compute networks
diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp_image_prep/files/partition.conf
new file mode 100644
index 000000000..b87e5e0b6
--- /dev/null
+++ b/roles/openshift_gcp_image_prep/files/partition.conf
@@ -0,0 +1,3 @@
+[Service]
+ExecStartPost=-/usr/bin/growpart /dev/sda 1
+ExecStartPost=-/sbin/xfs_growfs /
diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp_image_prep/tasks/main.yaml
new file mode 100644
index 000000000..fee5ab618
--- /dev/null
+++ b/roles/openshift_gcp_image_prep/tasks/main.yaml
@@ -0,0 +1,18 @@
+---
+# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
+- name: Remove barrier=1 from XFS fstab entries
+ lineinfile:
+ path: /etc/fstab
+ regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$'
+ line: '\1xfs\2 \4'
+ backrefs: yes
+
+- name: Ensure the root filesystem has XFS group quota turned on
+ lineinfile:
+ path: /boot/grub2/grub.cfg
+ regexp: '^(.*)linux16 (.*)$'
+ line: '\1linux16 \2 rootflags=gquota'
+ backrefs: yes
+
+- name: Ensure the root partition grows on startup
+ copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 8d35db6b5..326176273 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -3,7 +3,10 @@ Ansible action plugin to execute health checks in OpenShift clusters.
"""
import sys
import os
+import base64
import traceback
+import errno
+import json
from collections import defaultdict
from ansible.plugins.action import ActionBase
@@ -38,8 +41,13 @@ class ActionModule(ActionBase):
# storing the information we need in the result.
result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context')
+ # if the user wants to write check results to files, they provide this directory:
+ output_dir = task_vars.get("openshift_checks_output_dir")
+ if output_dir:
+ output_dir = os.path.join(output_dir, task_vars["ansible_host"])
+
try:
- known_checks = self.load_known_checks(tmp, task_vars)
+ known_checks = self.load_known_checks(tmp, task_vars, output_dir)
args = self._task.args
requested_checks = normalize(args.get('checks', []))
@@ -65,21 +73,20 @@ class ActionModule(ActionBase):
for name in resolved_checks:
display.banner("CHECK [{} : {}]".format(name, task_vars["ansible_host"]))
- check = known_checks[name]
- check_results[name] = run_check(name, check, user_disabled_checks)
- if check.changed:
- check_results[name]["changed"] = True
+ check_results[name] = run_check(name, known_checks[name], user_disabled_checks, output_dir)
result["changed"] = any(r.get("changed") for r in check_results.values())
if any(r.get("failed") for r in check_results.values()):
result["failed"] = True
result["msg"] = "One or more checks failed"
+ write_result_to_output_dir(output_dir, result)
return result
- def load_known_checks(self, tmp, task_vars):
+ def load_known_checks(self, tmp, task_vars, output_dir=None):
"""Find all existing checks and return a mapping of names to instances."""
load_checks()
+ want_full_results = bool(output_dir)
known_checks = {}
for cls in OpenShiftCheck.subclasses():
@@ -90,7 +97,12 @@ class ActionModule(ActionBase):
"duplicate check name '{}' in: '{}' and '{}'"
"".format(name, full_class_name(cls), full_class_name(other_cls))
)
- known_checks[name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
+ known_checks[name] = cls(
+ execute_module=self._execute_module,
+ tmp=tmp,
+ task_vars=task_vars,
+ want_full_results=want_full_results
+ )
return known_checks
@@ -185,9 +197,11 @@ def normalize(checks):
return [name.strip() for name in checks if name.strip()]
-def run_check(name, check, user_disabled_checks):
+def run_check(name, check, user_disabled_checks, output_dir=None):
"""Run a single check if enabled and return a result dict."""
- if name in user_disabled_checks:
+
+ # determine if we're going to run the check (not inactive or disabled)
+ if name in user_disabled_checks or '*' in user_disabled_checks:
return dict(skipped=True, skipped_reason="Disabled by user request")
# pylint: disable=broad-except; capturing exceptions broadly is intentional,
@@ -201,12 +215,134 @@ def run_check(name, check, user_disabled_checks):
if not is_active:
return dict(skipped=True, skipped_reason="Not active for this host")
+ # run the check
+ result = {}
try:
- return check.run()
+ result = check.run()
except OpenShiftCheckException as exc:
- return dict(failed=True, msg=str(exc))
+ check.register_failure(exc)
+ except Exception as exc:
+ check.register_failure("\n".join([str(exc), traceback.format_exc()]))
+
+ # process the check state; compose the result hash, write files as needed
+ if check.changed:
+ result["changed"] = True
+ if check.failures or result.get("failed"):
+ if "msg" in result: # failure result has msg; combine with any registered failures
+ check.register_failure(result.get("msg"))
+ result["failures"] = [(fail.name, str(fail)) for fail in check.failures]
+ result["failed"] = True
+ result["msg"] = "\n".join(str(fail) for fail in check.failures)
+ write_to_output_file(output_dir, name + ".failures.json", result["failures"])
+ if check.logs:
+ write_to_output_file(output_dir, name + ".log.json", check.logs)
+ if check.files_to_save:
+ write_files_to_save(output_dir, check)
+
+ return result
+
+
+def prepare_output_dir(dirname):
+ """Create the directory, including parents. Return bool for success/failure."""
+ try:
+ os.makedirs(dirname)
+ return True
+ except OSError as exc:
+ # trying to create existing dir leads to error;
+ # that error is fine, but for any other, assume the dir is not there
+ return exc.errno == errno.EEXIST
+
+
+def copy_remote_file_to_dir(check, file_to_save, output_dir, fname):
+ """Copy file from remote host to local file in output_dir, if given."""
+ if not output_dir or not prepare_output_dir(output_dir):
+ return
+ local_file = os.path.join(output_dir, fname)
+
+ # pylint: disable=broad-except; do not need to do anything about failure to write dir/file
+ # and do not want exceptions to break anything.
+ try:
+ # NOTE: it would have been nice to copy the file directly without loading it into
+ # memory, but there does not seem to be a good way to do this via ansible.
+ result = check.execute_module("slurp", dict(src=file_to_save), register=False)
+ if result.get("failed"):
+ display.warning("Could not retrieve file {}: {}".format(file_to_save, result.get("msg")))
+ return
+
+ content = result["content"]
+ if result.get("encoding") == "base64":
+ content = base64.b64decode(content)
+ with open(local_file, "wb") as outfile:
+ outfile.write(content)
+ except Exception as exc:
+ display.warning("Failed writing remote {} to local {}: {}".format(file_to_save, local_file, exc))
+ return
+
+
+def _no_fail(obj):
+ # pylint: disable=broad-except; do not want serialization to fail for any reason
+ try:
+ return str(obj)
+ except Exception:
+ return "[not serializable]"
+
+
+def write_to_output_file(output_dir, filename, data):
+ """If output_dir provided, write data to file. Serialize as JSON if data is not a string."""
+
+ if not output_dir or not prepare_output_dir(output_dir):
+ return
+ filename = os.path.join(output_dir, filename)
+ try:
+ with open(filename, 'w') as outfile:
+ if isinstance(data, string_types):
+ outfile.write(data)
+ else:
+ json.dump(data, outfile, sort_keys=True, indent=4, default=_no_fail)
+ # pylint: disable=broad-except; do not want serialization/write to break for any reason
+ except Exception as exc:
+ display.warning("Could not write output file {}: {}".format(filename, exc))
+
+
+def write_result_to_output_dir(output_dir, result):
+ """If output_dir provided, write the result as json to result.json.
+
+ Success/failure of the write is recorded as "output_files" in the result hash afterward.
+ Otherwise this is much like write_to_output_file.
+ """
+
+ if not output_dir:
+ return
+ if not prepare_output_dir(output_dir):
+ result["output_files"] = "Error creating output directory " + output_dir
+ return
+
+ filename = os.path.join(output_dir, "result.json")
+ try:
+ with open(filename, 'w') as outfile:
+ json.dump(result, outfile, sort_keys=True, indent=4, default=_no_fail)
+ result["output_files"] = "Check results for this host written to " + filename
+ # pylint: disable=broad-except; do not want serialization/write to break for any reason
except Exception as exc:
- return dict(failed=True, msg=str(exc), exception=traceback.format_exc())
+ result["output_files"] = "Error writing check results to {}:\n{}".format(filename, exc)
+
+
+def write_files_to_save(output_dir, check):
+ """Write files to check subdir in output dir."""
+ if not output_dir:
+ return
+ output_dir = os.path.join(output_dir, check.name)
+ seen_file = defaultdict(lambda: 0)
+ for file_to_save in check.files_to_save:
+ fname = file_to_save.filename
+ while seen_file[fname]: # just to be sure we never re-write a file, append numbers as needed
+ seen_file[fname] += 1
+ fname = "{}.{}".format(fname, seen_file[fname])
+ seen_file[fname] += 1
+ if file_to_save.remote_filename:
+ copy_remote_file_to_dir(check, file_to_save.remote_filename, output_dir, fname)
+ else:
+ write_to_output_file(output_dir, fname, file_to_save.contents)
def full_class_name(cls):
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 349655966..dcaf87eca 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -10,6 +10,7 @@ import traceback
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
from ansible.utils.color import stringc
+from ansible.module_utils.six import string_types
FAILED_NO_MSG = u'Failed without returning a message.'
@@ -140,11 +141,19 @@ def deduplicate_failures(failures):
Returns a new list of failures such that identical failures from different
hosts are grouped together in a single entry. The relative order of failures
is preserved.
+
+ If failures is unhashable, the original list of failures is returned.
"""
groups = defaultdict(list)
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
- groups[group_key].append(failure)
+ try:
+ groups[group_key].append(failure)
+ except TypeError:
+ # abort and return original list of failures when failures has an
+ # unhashable type.
+ return failures
+
result = []
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
@@ -159,7 +168,10 @@ def format_failure(failure):
"""Return a list of pretty-formatted text entries describing a failure, including
relevant information about it. Expect that the list of text entries will be joined
by a newline separator when output to the user."""
- host = u', '.join(failure['host'])
+ if isinstance(failure['host'], string_types):
+ host = failure['host']
+ else:
+ host = u', '.join(failure['host'])
play = failure['play']
task = failure['task']
msg = failure['msg']
diff --git a/roles/openshift_health_checker/library/ocutil.py b/roles/openshift_health_checker/library/ocutil.py
index 2e60735d6..c72f4c5b3 100644
--- a/roles/openshift_health_checker/library/ocutil.py
+++ b/roles/openshift_health_checker/library/ocutil.py
@@ -40,18 +40,17 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- namespace=dict(type="str", required=True),
+ namespace=dict(type="str", required=False),
config_file=dict(type="str", required=True),
cmd=dict(type="str", required=True),
extra_args=dict(type="list", default=[]),
),
)
- cmd = [
- locate_oc_binary(),
- '--config', module.params["config_file"],
- '-n', module.params["namespace"],
- ] + shlex.split(module.params["cmd"])
+ cmd = [locate_oc_binary(), '--config', module.params["config_file"]]
+ if module.params["namespace"]:
+ cmd += ['-n', module.params["namespace"]]
+ cmd += shlex.split(module.params["cmd"]) + module.params["extra_args"]
failed = True
try:
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 02ee1d0f9..ce05b44a4 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -2,14 +2,18 @@
Health checks for OpenShift clusters.
"""
+import json
import operator
import os
+import time
+import collections
from abc import ABCMeta, abstractmethod, abstractproperty
from importlib import import_module
from ansible.module_utils import six
from ansible.module_utils.six.moves import reduce # pylint: disable=import-error,redefined-builtin
+from ansible.module_utils.six import string_types
from ansible.plugins.filter.core import to_bool as ansible_to_bool
@@ -27,7 +31,7 @@ class OpenShiftCheckException(Exception):
class OpenShiftCheckExceptionList(OpenShiftCheckException):
- """A container for multiple logging errors that may be detected in one check."""
+ """A container for multiple errors that may be detected in one check."""
def __init__(self, errors):
self.errors = errors
super(OpenShiftCheckExceptionList, self).__init__(
@@ -40,26 +44,53 @@ class OpenShiftCheckExceptionList(OpenShiftCheckException):
return self.errors[index]
+FileToSave = collections.namedtuple("FileToSave", "filename contents remote_filename")
+
+
+# pylint: disable=too-many-instance-attributes; all represent significantly different state.
+# Arguably they could be separated into two hashes, one for storing parameters, and one for
+# storing result state; but that smells more like clutter than clarity.
@six.add_metaclass(ABCMeta)
class OpenShiftCheck(object):
- """
- A base class for defining checks for an OpenShift cluster environment.
+ """A base class for defining checks for an OpenShift cluster environment.
- Expect optional params: method execute_module, dict task_vars, and string tmp.
+ Optional init params: method execute_module, dict task_vars, and string tmp
execute_module is expected to have a signature compatible with _execute_module
from ansible plugins/action/__init__.py, e.g.:
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *args):
This is stored so that it can be invoked in subclasses via check.execute_module("name", args)
which provides the check's stored task_vars and tmp.
+
+ Optional init param: want_full_results
+ If the check can gather logs, tarballs, etc., do so when True; but no need to spend
+ the time if they're not wanted (won't be written to output directory).
"""
- def __init__(self, execute_module=None, task_vars=None, tmp=None):
+ def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False):
+ # store a method for executing ansible modules from the check
self._execute_module = execute_module
+ # the task variables and tmpdir passed into the health checker task
self.task_vars = task_vars or {}
self.tmp = tmp
+ # a boolean for disabling the gathering of results (files, computations) that won't
+ # actually be recorded/used
+ self.want_full_results = want_full_results
+
+ # mainly for testing purposes; see execute_module_with_retries
+ self._module_retries = 3
+ self._module_retry_interval = 5 # seconds
+ # state to be recorded for inspection after the check runs:
+ #
# set to True when the check changes the host, for accurate total "changed" count
self.changed = False
+ # list of OpenShiftCheckException for check to report (alternative to returning a failed result)
+ self.failures = []
+ # list of FileToSave - files the check specifies to be written locally if so configured
+ self.files_to_save = []
+ # log messages for the check - tuples of (description, msg) where msg is serializable.
+ # These are intended to be a sequential record of what the check observed and determined.
+ self.logs = []
@abstractproperty
def name(self):
@@ -80,9 +111,20 @@ class OpenShiftCheck(object):
"""Returns true if this check applies to the ansible-playbook run."""
return True
+ def is_first_master(self):
+ """Determine if running on first master. Returns: bool"""
+ masters = self.get_var("groups", "oo_first_master", default=None) or [None]
+ return masters[0] == self.get_var("ansible_host")
+
@abstractmethod
def run(self):
- """Executes a check, normally implemented as a module."""
+ """Executes a check against a host and returns a result hash similar to Ansible modules.
+
+ Actually the direction ahead is to record state in the attributes and
+ not bother building a result hash. Instead, return an empty hash and let
+ the action plugin fill it in. Or raise an OpenShiftCheckException.
+ Returning a hash may become deprecated if it does not prove necessary.
+ """
return {}
@classmethod
@@ -94,7 +136,43 @@ class OpenShiftCheck(object):
for subclass in subclass.subclasses():
yield subclass
- def execute_module(self, module_name=None, module_args=None):
+ def register_failure(self, error):
+ """Record in the check that a failure occurred.
+
+ Recorded failures are merged into the result hash for now. They are also saved to output directory
+ (if provided) <check>.failures.json and registered as a log entry for context <check>.log.json.
+ """
+ # It should be an exception; make it one if not
+ if not isinstance(error, OpenShiftCheckException):
+ error = OpenShiftCheckException(str(error))
+ self.failures.append(error)
+ # duplicate it in the logs so it can be seen in the context of any
+ # information that led to the failure
+ self.register_log("failure: " + error.name, str(error))
+
+ def register_log(self, context, msg):
+ """Record an entry for the check log.
+
+ Notes are intended to serve as context of the whole sequence of what the check observed.
+ They are be saved as an ordered list in a local check log file.
+ They are not to included in the result or in the ansible log; it's just for the record.
+ """
+ self.logs.append([context, msg])
+
+ def register_file(self, filename, contents=None, remote_filename=""):
+ """Record a file that a check makes available to be saved individually to output directory.
+
+ Either file contents should be passed in, or a file to be copied from the remote host
+ should be specified. Contents that are not a string are to be serialized as JSON.
+
+ NOTE: When copying a file from remote host, it is slurped into memory as base64, meaning
+ you should avoid using this on huge files (more than say 10M).
+ """
+ if contents is None and not remote_filename:
+ raise OpenShiftCheckException("File data/source not specified; this is a bug in the check.")
+ self.files_to_save.append(FileToSave(filename, contents, remote_filename))
+
+ def execute_module(self, module_name=None, module_args=None, save_as_name=None, register=True):
"""Invoke an Ansible module from a check.
Invoke stored _execute_module, normally copied from the action
@@ -106,6 +184,12 @@ class OpenShiftCheck(object):
Ansible version).
So e.g. check.execute_module("foo", dict(arg1=...))
+
+ save_as_name specifies a file name for saving the result to an output directory,
+ if needed, and is intended to uniquely identify the result of invoking execute_module.
+ If not provided, the module name will be used.
+ If register is set False, then the result won't be registered in logs or files to save.
+
Return: result hash from module execution.
"""
if self._execute_module is None:
@@ -113,7 +197,33 @@ class OpenShiftCheck(object):
self.__class__.__name__ +
" invoked execute_module without providing the method at initialization."
)
- return self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ result = self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ if result.get("changed"):
+ self.changed = True
+ for output in ["result", "stdout"]:
+ # output is often JSON; attempt to decode
+ try:
+ result[output + "_json"] = json.loads(result[output])
+ except (KeyError, ValueError):
+ pass
+
+ if register:
+ self.register_log("execute_module: " + module_name, result)
+ self.register_file(save_as_name or module_name + ".json", result)
+ return result
+
+ def execute_module_with_retries(self, module_name, module_args):
+ """Run execute_module and retry on failure."""
+ result = {}
+ tries = 0
+ while True:
+ res = self.execute_module(module_name, module_args)
+ if tries > self._module_retries or not res.get("failed"):
+ result.update(res)
+ return result
+ result["last_failed"] = res
+ tries += 1
+ time.sleep(self._module_retry_interval)
def get_var(self, *keys, **kwargs):
"""Get deeply nested values from task_vars.
@@ -171,8 +281,23 @@ class OpenShiftCheck(object):
'There is a bug in this check. While trying to convert variable \n'
' "{var}={value}"\n'
'the given converter cannot be used or failed unexpectedly:\n'
- '{error}'.format(var=".".join(keys), value=value, error=error)
- )
+ '{type}: {error}'.format(
+ var=".".join(keys),
+ value=value,
+ type=error.__class__.__name__,
+ error=error
+ ))
+
+ @staticmethod
+ def normalize(name_list):
+ """Return a clean list of names.
+
+ The input may be a comma-separated string or a sequence. Leading and
+ trailing whitespace characters are removed. Empty items are discarded.
+ """
+ if isinstance(name_list, string_types):
+ name_list = name_list.split(',')
+ return [name.strip() for name in name_list if name.strip()]
@staticmethod
def get_major_minor_version(openshift_image_tag):
@@ -214,7 +339,9 @@ class OpenShiftCheck(object):
mount_point = os.path.dirname(mount_point)
try:
- return mount_for_path[mount_point]
+ mount = mount_for_path[mount_point]
+ self.register_log("mount point for " + path, mount)
+ return mount
except KeyError:
known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path))
raise OpenShiftCheckException(
@@ -242,7 +369,7 @@ def load_checks(path=None, subpkg=""):
modules = modules + load_checks(os.path.join(path, name), subpkg + "." + name)
continue
- if name.endswith(".py") and not name.startswith(".") and name not in LOADER_EXCLUDES:
+ if name.endswith(".py") and name not in LOADER_EXCLUDES:
modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
return modules
diff --git a/roles/openshift_health_checker/openshift_checks/diagnostics.py b/roles/openshift_health_checker/openshift_checks/diagnostics.py
new file mode 100644
index 000000000..1cfdc1129
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/diagnostics.py
@@ -0,0 +1,62 @@
+"""
+A check to run relevant diagnostics via `oc adm diagnostics`.
+"""
+
+import os
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+
+
+DIAGNOSTIC_LIST = (
+ "AggregatedLogging ClusterRegistry ClusterRoleBindings ClusterRoles "
+ "ClusterRouter DiagnosticPod NetworkCheck"
+).split()
+
+
+class DiagnosticCheck(OpenShiftCheck):
+ """A check to run relevant diagnostics via `oc adm diagnostics`."""
+
+ name = "diagnostics"
+ tags = ["health"]
+
+ def is_active(self):
+ return super(DiagnosticCheck, self).is_active() and self.is_first_master()
+
+ def run(self):
+ if self.exec_diagnostic("ConfigContexts"):
+ # only run the other diagnostics if that one succeeds (otherwise, all will fail)
+ diagnostics = self.get_var("openshift_check_diagnostics", default=DIAGNOSTIC_LIST)
+ for diagnostic in self.normalize(diagnostics):
+ self.exec_diagnostic(diagnostic)
+ return {}
+
+ def exec_diagnostic(self, diagnostic):
+ """
+ Execute an 'oc adm diagnostics' command on the remote host.
+ Raises OcNotFound or registers OcDiagFailed.
+ Returns True on success or False on failure (non-zero rc).
+ """
+ config_base = self.get_var("openshift.common.config_base")
+ args = {
+ "config_file": os.path.join(config_base, "master", "admin.kubeconfig"),
+ "cmd": "adm diagnostics",
+ "extra_args": [diagnostic],
+ }
+
+ result = self.execute_module("ocutil", args, save_as_name=diagnostic + ".failure.json")
+ self.register_file(diagnostic + ".txt", result['result'])
+ if result.get("failed"):
+ if result['result'] == '[Errno 2] No such file or directory':
+ raise OpenShiftCheckException(
+ "OcNotFound",
+ "This host is supposed to be a master but does not have the `oc` command where expected.\n"
+ "Has an installation been run on this host yet?"
+ )
+
+ self.register_failure(OpenShiftCheckException(
+ 'OcDiagFailed',
+ 'The {diag} diagnostic reported an error:\n'
+ '{error}'.format(diag=diagnostic, error=result['result'])
+ ))
+ return False
+ return True
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index f302fd14b..cdf56e959 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -70,6 +70,10 @@ class DiskAvailability(OpenShiftCheck):
# If it is not a number, then it should be a nested dict.
pass
+ self.register_log("recommended thresholds", self.recommended_disk_space_bytes)
+ if user_config:
+ self.register_log("user-configured thresholds", user_config)
+
# TODO: as suggested in
# https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,
# maybe we could support checking disk availability in paths that are
@@ -113,10 +117,7 @@ class DiskAvailability(OpenShiftCheck):
'in your Ansible inventory, and lower the recommended disk space availability\n'
'if necessary for this upgrade.').format(config_bytes)
- return {
- 'failed': True,
- 'msg': msg,
- }
+ self.register_failure(msg)
return {}
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 866c74d7c..93a5973d4 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -32,7 +32,12 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# we use python-docker-py to check local docker for images, and skopeo
# to look for images available remotely without waiting to pull them.
dependencies = ["python-docker-py", "skopeo"]
- skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false"
+ skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}"
+
+ def __init__(self, *args, **kwargs):
+ super(DockerImageAvailability, self).__init__(*args, **kwargs)
+ # record whether we could reach a registry or not (and remember results)
+ self.reachable_registries = {}
def is_active(self):
"""Skip hosts with unsupported deployment types."""
@@ -64,15 +69,21 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
unavailable_images = set(missing_images) - set(available_images)
if unavailable_images:
- return {
- "failed": True,
- "msg": (
- "One or more required Docker images are not available:\n {}\n"
- "Configured registries: {}\n"
- "Checked by: {}"
- ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries),
- self.skopeo_img_check_command),
- }
+ registries = [
+ reg if self.reachable_registries.get(reg, True) else reg + " (unreachable)"
+ for reg in registries
+ ]
+ msg = (
+ "One or more required Docker images are not available:\n {}\n"
+ "Configured registries: {}\n"
+ "Checked by: {}"
+ ).format(
+ ",\n ".join(sorted(unavailable_images)),
+ ", ".join(registries),
+ self.skopeo_img_check_command
+ )
+
+ return dict(failed=True, msg=msg)
return {}
@@ -98,8 +109,6 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# containerized etcd may not have openshift_image_tag, see bz 1466622
image_tag = self.get_var("openshift_image_tag", default="latest")
image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]
- if not image_info:
- return required
# template for images that run on top of OpenShift
image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
@@ -128,31 +137,31 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
def local_images(self, images):
"""Filter a list of images and return those available locally."""
- return [
- image for image in images
- if self.is_image_local(image)
- ]
+ registries = self.known_docker_registries()
+ found_images = []
+ for image in images:
+ # docker could have the image name as-is or prefixed with any registry
+ imglist = [image] + [reg + "/" + image for reg in registries]
+ if self.is_image_local(imglist):
+ found_images.append(image)
+ return found_images
def is_image_local(self, image):
"""Check if image is already in local docker index."""
result = self.execute_module("docker_image_facts", {"name": image})
- if result.get("failed", False):
- return False
-
- return bool(result.get("images", []))
+ return bool(result.get("images")) and not result.get("failed")
def known_docker_registries(self):
"""Build a list of docker registries available according to inventory vars."""
- docker_facts = self.get_var("openshift", "docker")
- regs = set(docker_facts["additional_registries"])
+ regs = list(self.get_var("openshift_docker_additional_registries", default=[]))
deployment_type = self.get_var("openshift_deployment_type")
- if deployment_type == "origin":
- regs.update(["docker.io"])
- elif "enterprise" in deployment_type:
- regs.update(["registry.access.redhat.com"])
+ if deployment_type == "origin" and "docker.io" not in regs:
+ regs.append("docker.io")
+ elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs:
+ regs.append("registry.access.redhat.com")
- return list(regs)
+ return regs
def available_images(self, images, default_registries):
"""Search remotely for images. Returns: list of images found."""
@@ -165,17 +174,35 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"""Use Skopeo to determine if required image exists in known registry(s)."""
registries = default_registries
- # if image already includes a registry, only use that
+ # If image already includes a registry, only use that.
+ # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.
+ # registry.access.redhat.com/rhel7 as if the registry were a namespace.
+ # It's not clear that there's any way to distinguish them, but fortunately
+ # the current set of images all look like [registry/]namespace/name[:version].
if image.count("/") > 1:
registry, image = image.split("/", 1)
registries = [registry]
for registry in registries:
- args = {
- "_raw_params": self.skopeo_img_check_command + " docker://{}/{}".format(registry, image)
- }
- result = self.execute_module("command", args)
+ if registry not in self.reachable_registries:
+ self.reachable_registries[registry] = self.connect_to_registry(registry)
+ if not self.reachable_registries[registry]:
+ continue
+
+ args = {"_raw_params": self.skopeo_img_check_command.format(registry=registry, image=image)}
+ result = self.execute_module_with_retries("command", args)
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
+ if result.get("rc") == 124: # RC 124 == timed out; mark unreachable
+ self.reachable_registries[registry] = False
return False
+
+ def connect_to_registry(self, registry):
+ """Use ansible wait_for module to test connectivity from host to registry. Returns bool."""
+ # test a simple TCP connection
+ host, _, port = registry.partition(":")
+ port = port or 443
+ args = dict(host=host, port=port, state="started", timeout=30)
+ result = self.execute_module("wait_for", args)
+ return result.get("rc", 0) == 0 and not result.get("failed")
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
index e5d93ff3f..79955cb2f 100644
--- a/roles/openshift_health_checker/openshift_checks/etcd_volume.py
+++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
@@ -16,7 +16,7 @@ class EtcdVolume(OpenShiftCheck):
def is_active(self):
etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or []
- is_etcd_host = self.get_var("ansible_ssh_host") in etcd_hosts
+ is_etcd_host = self.get_var("ansible_host") in etcd_hosts
return super(EtcdVolume, self).is_active() and is_etcd_host
def run(self):
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
index 7fc843fd7..986a01f38 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -72,7 +72,7 @@ class Elasticsearch(LoggingCheck):
for pod_name in pods_by_name.keys():
# Compare what each ES node reports as master and compare for split brain
get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
- master_name_str = self.exec_oc(get_master_cmd, [])
+ master_name_str = self.exec_oc(get_master_cmd, [], save_as_name="get_master_names.json")
master_names = (master_name_str or '').split(' ')
if len(master_names) > 1:
es_master_names.add(master_names[1])
@@ -113,7 +113,7 @@ class Elasticsearch(LoggingCheck):
# get ES cluster nodes
node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
- cluster_node_data = self.exec_oc(node_cmd, [])
+ cluster_node_data = self.exec_oc(node_cmd, [], save_as_name="get_es_nodes.json")
try:
cluster_nodes = json.loads(cluster_node_data)['nodes']
except (ValueError, KeyError):
@@ -142,7 +142,7 @@ class Elasticsearch(LoggingCheck):
errors = []
for pod_name in pods_by_name.keys():
cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
- cluster_health_data = self.exec_oc(cluster_health_cmd, [])
+ cluster_health_data = self.exec_oc(cluster_health_cmd, [], save_as_name='get_es_health.json')
try:
health_res = json.loads(cluster_health_data)
if not health_res or not health_res.get('status'):
@@ -171,7 +171,7 @@ class Elasticsearch(LoggingCheck):
errors = []
for pod_name in pods_by_name.keys():
df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
- disk_output = self.exec_oc(df_cmd, [])
+ disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json')
lines = disk_output.splitlines()
# expecting one header looking like 'IUse% Use%' and one body line
body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
index ecd8adb64..05ba73ca1 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -30,14 +30,6 @@ class LoggingCheck(OpenShiftCheck):
logging_deployed = self.get_var("openshift_hosted_logging_deploy", convert=bool, default=False)
return logging_deployed and super(LoggingCheck, self).is_active() and self.is_first_master()
- def is_first_master(self):
- """Determine if running on first master. Returns: bool"""
- # Note: It would be nice to use membership in oo_first_master group, however for now it
- # seems best to avoid requiring that setup and just check this is the first master.
- hostname = self.get_var("ansible_ssh_host") or [None]
- masters = self.get_var("groups", "masters", default=None) or [None]
- return masters[0] == hostname
-
def run(self):
return {}
@@ -78,7 +70,7 @@ class LoggingCheck(OpenShiftCheck):
"""Returns the namespace in which logging is configured to deploy."""
return self.get_var("openshift_logging_namespace", default="logging")
- def exec_oc(self, cmd_str="", extra_args=None):
+ def exec_oc(self, cmd_str="", extra_args=None, save_as_name=None):
"""
Execute an 'oc' command in the remote host.
Returns: output of command and namespace,
@@ -92,7 +84,7 @@ class LoggingCheck(OpenShiftCheck):
"extra_args": list(extra_args) if extra_args else [],
}
- result = self.execute_module("ocutil", args)
+ result = self.execute_module("ocutil", args, save_as_name=save_as_name)
if result.get("failed"):
if result['result'] == '[Errno 2] No such file or directory':
raise CouldNotUseOc(
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
index d781db649..cacdf4213 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
@@ -104,7 +104,7 @@ class LoggingIndexTime(LoggingCheck):
"https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}"
)
exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace(), uuid=uuid)
- result = self.exec_oc(exec_cmd, [])
+ result = self.exec_oc(exec_cmd, [], save_as_name="query_for_uuid.json")
try:
count = json.loads(result)["count"]
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index e9bae60a3..b90ebf6dd 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -36,7 +36,7 @@ class DockerHostMixin(object):
# NOTE: we would use the "package" module but it's actually an action plugin
# and it's not clear how to invoke one of those. This is about the same anyway:
- result = self.execute_module(
+ result = self.execute_module_with_retries(
self.get_var("ansible_pkg_mgr", default="yum"),
{"name": self.dependencies, "state": "present"},
)
@@ -49,5 +49,4 @@ class DockerHostMixin(object):
" {deps}\n{msg}"
).format(deps=',\n '.join(self.dependencies), msg=msg)
failed = result.get("failed", False) or result.get("rc", 0) != 0
- self.changed = result.get("changed", False)
return msg, failed
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index a86180b00..21355c2f0 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -26,7 +26,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
- return self.execute_module("check_yum_update", args)
+ return self.execute_module_with_retries("check_yum_update", args)
@staticmethod
def master_packages(rpm_prefix):
diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py
index 1e9aecbe0..8464e8a5e 100644
--- a/roles/openshift_health_checker/openshift_checks/package_update.py
+++ b/roles/openshift_health_checker/openshift_checks/package_update.py
@@ -11,4 +11,4 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
def run(self):
args = {"packages": []}
- return self.execute_module("check_yum_update", args)
+ return self.execute_module_with_retries("check_yum_update", args)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 0b795b6c4..d4aec3ed8 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -76,7 +76,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
],
}
- return self.execute_module("aos_version", args)
+ return self.execute_module_with_retries("aos_version", args)
def get_required_ovs_version(self):
"""Return the correct Open vSwitch version(s) for the current OpenShift version."""
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index c109ebd24..f14887303 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -3,10 +3,12 @@ import pytest
from ansible.playbook.play_context import PlayContext
from openshift_health_check import ActionModule, resolve_checks
-from openshift_checks import OpenShiftCheckException
+from openshift_health_check import copy_remote_file_to_dir, write_result_to_output_dir, write_to_output_file
+from openshift_checks import OpenShiftCheckException, FileToSave
-def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None, changed=False):
+def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None,
+ run_logs=None, run_files=None, changed=False, get_var_return=None):
"""Returns a new class that is compatible with OpenShiftCheck for testing."""
_name, _tags = name, tags
@@ -14,12 +16,16 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
class FakeCheck(object):
name = _name
tags = _tags or []
- changed = False
- def __init__(self, execute_module=None, task_vars=None, tmp=None):
- pass
+ def __init__(self, **_):
+ self.changed = False
+ self.failures = []
+ self.logs = run_logs or []
+ self.files_to_save = run_files or []
def is_active(self):
+ if isinstance(is_active, Exception):
+ raise is_active
return is_active
def run(self):
@@ -28,6 +34,13 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
raise run_exception
return run_return
+ def get_var(*args, **_):
+ return get_var_return
+
+ def register_failure(self, exc):
+ self.failures.append(OpenShiftCheckException(str(exc)))
+ return
+
return FakeCheck
@@ -98,23 +111,33 @@ def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars,
assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck'])
-def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
- checks = [fake_check(is_active=False)]
+@pytest.mark.parametrize('is_active, skipped_reason', [
+ (False, "Not active for this host"),
+ (Exception("borked"), "exception"),
+])
+def test_action_plugin_skip_non_active_checks(is_active, skipped_reason, plugin, task_vars, monkeypatch):
+ checks = [fake_check(is_active=is_active)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
result = plugin.run(tmp=None, task_vars=task_vars)
- assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Not active for this host")
+ assert result['checks']['fake_check'].get('skipped')
+ assert skipped_reason in result['checks']['fake_check'].get('skipped_reason')
assert not failed(result)
assert not changed(result)
assert not skipped(result)
-def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('to_disable', [
+ 'fake_check',
+ ['fake_check', 'spam'],
+ '*,spam,eggs',
+])
+def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monkeypatch):
checks = [fake_check('fake_check', is_active=True)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
- task_vars['openshift_disable_check'] = 'fake_check'
+ task_vars['openshift_disable_check'] = to_disable
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request")
@@ -123,10 +146,21 @@ def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
assert not skipped(result)
+def test_action_plugin_run_list_checks(monkeypatch):
+ task = FakeTask('openshift_health_check', {'checks': []})
+ plugin = ActionModule(task, None, PlayContext(), None, None, None)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
+ result = plugin.run()
+
+ assert failed(result, msg_has="Available checks")
+ assert not changed(result)
+ assert not skipped(result)
+
+
def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
- check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ check_class = fake_check(run_return=check_return_value, run_files=[None])
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -140,7 +174,7 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
check_class = fake_check(run_return=check_return_value, changed=True)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -153,9 +187,9 @@ def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
- check_return_value = {'failed': True}
+ check_return_value = {'failed': True, 'msg': 'this is a failure'}
check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -166,24 +200,51 @@ def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
assert not skipped(result)
-def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('exc_class, expect_traceback', [
+ (OpenShiftCheckException, False),
+ (Exception, True),
+])
+def test_action_plugin_run_check_exception(plugin, task_vars, exc_class, expect_traceback, monkeypatch):
exception_msg = 'fake check has an exception'
- run_exception = OpenShiftCheckException(exception_msg)
+ run_exception = exc_class(exception_msg)
check_class = fake_check(run_exception=run_exception, changed=True)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result['checks']['fake_check'], msg_has=exception_msg)
+ assert expect_traceback == ("Traceback" in result['checks']['fake_check']['msg'])
assert failed(result, msg_has=['failed'])
assert changed(result['checks']['fake_check'])
assert changed(result)
assert not skipped(result)
+def test_action_plugin_run_check_output_dir(plugin, task_vars, tmpdir, monkeypatch):
+ check_class = fake_check(
+ run_return={},
+ run_logs=[('thing', 'note')],
+ run_files=[
+ FileToSave('save.file', 'contents', None),
+ FileToSave('save.file', 'duplicate', None),
+ FileToSave('copy.file', None, 'foo'), # note: copy runs execute_module => exception
+ ],
+ )
+ task_vars['openshift_checks_output_dir'] = str(tmpdir)
+ check_class.get_var = lambda self, name, **_: task_vars.get(name)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ plugin.run(tmp=None, task_vars=task_vars)
+ assert any(path.basename == task_vars['ansible_host'] for path in tmpdir.listdir())
+ assert any(path.basename == 'fake_check.log.json' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file.2' for path in tmpdir.visit())
+
+
def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -249,3 +310,38 @@ def test_resolve_checks_failure(names, all_checks, words_in_exception):
resolve_checks(names, all_checks)
for word in words_in_exception:
assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('give_output_dir, result, expect_file', [
+ (False, None, False),
+ (True, dict(content="c3BhbQo=", encoding="base64"), True),
+ (True, dict(content="encoding error", encoding="base64"), False),
+ (True, dict(content="spam", no_encoding=None), True),
+ (True, dict(failed=True, msg="could not slurp"), False),
+])
+def test_copy_remote_file_to_dir(give_output_dir, result, expect_file, tmpdir):
+ check = fake_check()()
+ check.execute_module = lambda *args, **_: result
+ copy_remote_file_to_dir(check, "remote_file", str(tmpdir) if give_output_dir else "", "local_file")
+ assert expect_file == any(path.basename == "local_file" for path in tmpdir.listdir())
+
+
+def test_write_to_output_exceptions(tmpdir, monkeypatch, capsys):
+
+ class Spam(object):
+ def __str__(self):
+ raise Exception("break str")
+
+ test = {1: object(), 2: Spam()}
+ test[3] = test
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error writing" in test["output_files"]
+
+ output_dir = tmpdir.join("eggs")
+ output_dir.write("spam") # so now it's not a dir
+ write_to_output_file(str(output_dir), "somefile", "somedata")
+ assert "Could not write" in capsys.readouterr()[1]
+
+ monkeypatch.setattr("openshift_health_check.prepare_output_dir", lambda *_: False)
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error creating" in test["output_files"]
diff --git a/roles/openshift_health_checker/test/diagnostics_test.py b/roles/openshift_health_checker/test/diagnostics_test.py
new file mode 100644
index 000000000..800889fa7
--- /dev/null
+++ b/roles/openshift_health_checker/test/diagnostics_test.py
@@ -0,0 +1,50 @@
+import pytest
+
+from openshift_checks.diagnostics import DiagnosticCheck, OpenShiftCheckException
+
+
+@pytest.fixture()
+def task_vars():
+ return dict(
+ openshift=dict(
+ common=dict(config_base="/etc/origin/")
+ )
+ )
+
+
+def test_module_succeeds(task_vars):
+ check = DiagnosticCheck(lambda *_: {"result": "success"}, task_vars)
+ check.is_first_master = lambda: True
+ assert check.is_active()
+ check.exec_diagnostic("spam")
+ assert not check.failures
+
+
+def test_oc_not_there(task_vars):
+ def exec_module(*_):
+ return {"failed": True, "result": "[Errno 2] No such file or directory"}
+
+ check = DiagnosticCheck(exec_module, task_vars)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.exec_diagnostic("spam")
+ assert excinfo.value.name == "OcNotFound"
+
+
+def test_module_fails(task_vars):
+ def exec_module(*_):
+ return {"failed": True, "result": "something broke"}
+
+ check = DiagnosticCheck(exec_module, task_vars)
+ check.exec_diagnostic("spam")
+ assert check.failures and check.failures[0].name == "OcDiagFailed"
+
+
+def test_names_executed(task_vars):
+ task_vars["openshift_check_diagnostics"] = diagnostics = "ConfigContexts,spam,,eggs"
+
+ def exec_module(module, args, *_):
+ assert "extra_args" in args
+ assert args["extra_args"][0] in diagnostics
+ return {"result": "success"}
+
+ DiagnosticCheck(exec_module, task_vars).run()
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index f4fd2dfed..9ae679b79 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -183,11 +183,12 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a
ansible_mounts=ansible_mounts,
)
- result = DiskAvailability(fake_execute_module, task_vars).run()
+ check = DiskAvailability(fake_execute_module, task_vars)
+ check.run()
- assert result['failed']
+ assert check.failures
for chunk in 'below recommended'.split() + expect_chunks:
- assert chunk in result.get('msg', '')
+ assert chunk in str(check.failures[0])
@pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [
@@ -237,11 +238,11 @@ def test_min_required_space_changes_with_upgrade_context(name, group_names, cont
)
check = DiskAvailability(fake_execute_module, task_vars)
- result = check.run()
+ check.run()
- assert result.get("failed", False) == failed
+ assert bool(check.failures) == failed
for word in extra_words:
- assert word in result.get('msg', '')
+ assert word in str(check.failures[0])
def fake_execute_module(*args):
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 8d0a53df9..c523ffd5c 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,11 +3,26 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
+@pytest.fixture()
+def task_vars():
+ return dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(),
+ ),
+ openshift_deployment_type='origin',
+ openshift_image_tag='',
+ group_names=['nodes', 'masters'],
+ )
+
+
@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
("origin", True, [], True),
("openshift-enterprise", True, [], True),
- ("enterprise", True, [], False),
- ("online", True, [], False),
("invalid", True, [], False),
("", True, [], False),
("origin", False, [], False),
@@ -15,12 +30,10 @@ from openshift_checks.docker_image_availability import DockerImageAvailability
("origin", False, ["nodes", "masters"], True),
("openshift-enterprise", False, ["etcd"], False),
])
-def test_is_active(deployment_type, is_containerized, group_names, expect_active):
- task_vars = dict(
- openshift=dict(common=dict(is_containerized=is_containerized)),
- openshift_deployment_type=deployment_type,
- group_names=group_names,
- )
+def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
+ task_vars['openshift_deployment_type'] = deployment_type
+ task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['group_names'] = group_names
assert DockerImageAvailability(None, task_vars).is_active() == expect_active
@@ -30,10 +43,10 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active
(True, False),
(False, True),
])
-def test_all_images_available_locally(is_containerized, is_atomic):
+def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
def execute_module(module_name, module_args, *_):
if module_name == "yum":
- return {"changed": True}
+ return {}
assert module_name == "docker_image_facts"
assert 'name' in module_args
@@ -42,19 +55,9 @@ def test_all_images_available_locally(is_containerized, is_atomic):
'images': [module_args['name']],
}
- result = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=is_containerized,
- is_atomic=is_atomic,
- ),
- docker=dict(additional_registries=["docker.io"]),
- ),
- openshift_deployment_type='origin',
- openshift_image_tag='3.4',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['openshift']['common']['is_atomic'] = is_atomic
+ result = DockerImageAvailability(execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -63,30 +66,42 @@ def test_all_images_available_locally(is_containerized, is_atomic):
False,
True,
])
-def test_all_images_available_remotely(available_locally):
+def test_all_images_available_remotely(task_vars, available_locally):
def execute_module(module_name, *_):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
- return {'changed': False}
+ return {}
- result = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]),
- ),
- openshift_deployment_type='origin',
- openshift_image_tag='v3.4',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift_docker_additional_registries'] = ["docker.io", "registry.access.redhat.com"]
+ task_vars['openshift_image_tag'] = 'v3.4'
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ result = check.run()
assert not result.get('failed', False)
-def test_all_images_unavailable():
+def test_all_images_unavailable(task_vars):
+ def execute_module(module_name=None, *args):
+ if module_name == "wait_for":
+ return {}
+ elif module_name == "command":
+ return {'failed': True}
+
+ return {} # docker_image_facts failure
+
+ task_vars['openshift_docker_additional_registries'] = ["docker.io"]
+ task_vars['openshift_deployment_type'] = "openshift-enterprise"
+ task_vars['openshift_image_tag'] = 'latest'
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ actual = check.run()
+
+ assert actual['failed']
+ assert "required Docker images are not available" in actual['msg']
+
+
+def test_no_known_registries():
def execute_module(module_name=None, *_):
if module_name == "command":
return {
@@ -97,22 +112,26 @@ def test_all_images_unavailable():
'changed': False,
}
- actual = DockerImageAvailability(execute_module, task_vars=dict(
+ def mock_known_docker_registries():
+ return []
+
+ dia = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
is_containerized=False,
is_atomic=False,
- ),
- docker=dict(additional_registries=["docker.io"]),
+ )
),
+ openshift_docker_additional_registries=["docker.io"],
openshift_deployment_type="openshift-enterprise",
openshift_image_tag='latest',
group_names=['nodes', 'masters'],
- )).run()
-
+ ))
+ dia.known_docker_registries = mock_known_docker_registries
+ actual = dia.run()
assert actual['failed']
- assert "required Docker images are not available" in actual['msg']
+ assert "Unable to retrieve any docker registries." in actual['msg']
@pytest.mark.parametrize("message,extra_words", [
@@ -125,62 +144,63 @@ def test_all_images_unavailable():
["dependencies can be installed via `yum`"]
),
])
-def test_skopeo_update_failure(message, extra_words):
+def test_skopeo_update_failure(task_vars, message, extra_words):
def execute_module(module_name=None, *_):
if module_name == "yum":
return {
"failed": True,
"msg": message,
- "changed": False,
}
- return {'changed': False}
+ return {}
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["unknown.io"]),
- ),
- openshift_deployment_type="openshift-enterprise",
- openshift_image_tag='',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift_docker_additional_registries'] = ["unknown.io"]
+ task_vars['openshift_deployment_type'] = "openshift-enterprise"
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ actual = check.run()
assert actual["failed"]
for word in extra_words:
assert word in actual["msg"]
-@pytest.mark.parametrize("deployment_type,registries", [
- ("origin", ["unknown.io"]),
- ("openshift-enterprise", ["registry.access.redhat.com"]),
- ("openshift-enterprise", []),
-])
-def test_registry_availability(deployment_type, registries):
+@pytest.mark.parametrize(
+ "image, registries, connection_test_failed, skopeo_failed, "
+ "expect_success, expect_registries_reached", [
+ (
+ "spam/eggs:v1", ["test.reg"],
+ True, True,
+ False,
+ {"test.reg": False},
+ ),
+ (
+ "spam/eggs:v1", ["test.reg"],
+ False, True,
+ False,
+ {"test.reg": True},
+ ),
+ (
+ "eggs.reg/spam/eggs:v1", ["test.reg"],
+ False, False,
+ True,
+ {"eggs.reg": True},
+ ),
+ ])
+def test_registry_availability(image, registries, connection_test_failed, skopeo_failed,
+ expect_success, expect_registries_reached):
def execute_module(module_name=None, *_):
- return {
- 'changed': False,
- }
+ if module_name == "wait_for":
+ return dict(msg="msg", failed=connection_test_failed)
+ elif module_name == "command":
+ return dict(msg="msg", failed=skopeo_failed)
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=registries),
- ),
- openshift_deployment_type=deployment_type,
- openshift_image_tag='',
- group_names=['nodes', 'masters'],
- )).run()
+ check = DockerImageAvailability(execute_module, task_vars())
+ check._module_retry_interval = 0
- assert not actual.get("failed", False)
+ available = check.is_available_skopeo_image(image, registries)
+ assert available == expect_success
+ assert expect_registries_reached == check.reachable_registries
@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
@@ -257,7 +277,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex
openshift_image_tag='vtest',
)
- assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
+ assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
def test_containerized_etcd():
@@ -271,4 +291,4 @@ def test_containerized_etcd():
group_names=['etcd'],
)
expected = set(['registry.access.redhat.com/rhel7/etcd'])
- assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
+ assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py
index 09bacd9ac..3fa5e8929 100644
--- a/roles/openshift_health_checker/test/elasticsearch_test.py
+++ b/roles/openshift_health_checker/test/elasticsearch_test.py
@@ -72,7 +72,7 @@ def test_check_elasticsearch():
assert_error_in_list('NoRunningPods', excinfo.value)
# canned oc responses to match so all the checks pass
- def exec_oc(cmd, args):
+ def exec_oc(cmd, args, **_):
if '_cat/master' in cmd:
return 'name logging-es'
elif '/_nodes' in cmd:
@@ -97,7 +97,7 @@ def test_check_running_es_pods():
def test_check_elasticsearch_masters():
pods = [plain_es_pod]
- check = canned_elasticsearch(task_vars_config_base, lambda *_: plain_es_pod['_test_master_name_str'])
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: plain_es_pod['_test_master_name_str'])
assert not check.check_elasticsearch_masters(pods_by_name(pods))
@@ -117,7 +117,7 @@ def test_check_elasticsearch_masters():
])
def test_check_elasticsearch_masters_error(pods, expect_error):
test_pods = list(pods)
- check = canned_elasticsearch(task_vars_config_base, lambda *_: test_pods.pop(0)['_test_master_name_str'])
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: test_pods.pop(0)['_test_master_name_str'])
assert_error_in_list(expect_error, check.check_elasticsearch_masters(pods_by_name(pods)))
@@ -129,7 +129,7 @@ es_node_list = {
def test_check_elasticsearch_node_list():
- check = canned_elasticsearch(task_vars_config_base, lambda *_: json.dumps(es_node_list))
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: json.dumps(es_node_list))
assert not check.check_elasticsearch_node_list(pods_by_name([plain_es_pod]))
@@ -151,13 +151,13 @@ def test_check_elasticsearch_node_list():
),
])
def test_check_elasticsearch_node_list_errors(pods, node_list, expect_error):
- check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(node_list))
+ check = canned_elasticsearch(task_vars_config_base, lambda cmd, args, **_: json.dumps(node_list))
assert_error_in_list(expect_error, check.check_elasticsearch_node_list(pods_by_name(pods)))
def test_check_elasticsearch_cluster_health():
test_health_data = [{"status": "green"}]
- check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
assert not check.check_es_cluster_health(pods_by_name([plain_es_pod]))
@@ -175,12 +175,12 @@ def test_check_elasticsearch_cluster_health():
])
def test_check_elasticsearch_cluster_health_errors(pods, health_data, expect_error):
test_health_data = list(health_data)
- check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
assert_error_in_list(expect_error, check.check_es_cluster_health(pods_by_name(pods)))
def test_check_elasticsearch_diskspace():
- check = canned_elasticsearch(exec_oc=lambda *_: 'IUse% Use%\n 3% 4%\n')
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: 'IUse% Use%\n 3% 4%\n')
assert not check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))
@@ -199,5 +199,5 @@ def test_check_elasticsearch_diskspace():
),
])
def test_check_elasticsearch_diskspace_errors(disk_data, expect_error):
- check = canned_elasticsearch(exec_oc=lambda *_: disk_data)
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: disk_data)
assert_error_in_list(expect_error, check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod])))
diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py
index 1a1c190f6..59c703214 100644
--- a/roles/openshift_health_checker/test/logging_check_test.py
+++ b/roles/openshift_health_checker/test/logging_check_test.py
@@ -98,21 +98,19 @@ def test_oc_failure(problem, expect):
assert expect in str(excinfo)
-groups_with_first_master = dict(masters=['this-host', 'other-host'])
-groups_with_second_master = dict(masters=['other-host', 'this-host'])
-groups_not_a_master = dict(masters=['other-host'])
+groups_with_first_master = dict(oo_first_master=['this-host'])
+groups_not_a_master = dict(oo_first_master=['other-host'], oo_masters=['other-host'])
@pytest.mark.parametrize('groups, logging_deployed, is_active', [
(groups_with_first_master, True, True),
(groups_with_first_master, False, False),
(groups_not_a_master, True, False),
- (groups_with_second_master, True, False),
(groups_not_a_master, True, False),
])
def test_is_active(groups, logging_deployed, is_active):
task_vars = dict(
- ansible_ssh_host='this-host',
+ ansible_host='this-host',
groups=groups,
openshift_hosted_logging_deploy=logging_deployed,
)
diff --git a/roles/openshift_health_checker/test/logging_index_time_test.py b/roles/openshift_health_checker/test/logging_index_time_test.py
index 22566b295..c48ade9b8 100644
--- a/roles/openshift_health_checker/test/logging_index_time_test.py
+++ b/roles/openshift_health_checker/test/logging_index_time_test.py
@@ -102,7 +102,7 @@ def test_with_running_pods():
),
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
@@ -131,7 +131,7 @@ def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
)
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
with pytest.raises(OpenShiftCheckException) as error:
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, SAMPLE_UUID, timeout)
@@ -139,7 +139,7 @@ def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
def test_curl_kibana_with_uuid():
- check = canned_loggingindextime(lambda *_: json.dumps({"statusCode": 404}))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps({"statusCode": 404}))
check.generate_uuid = lambda: SAMPLE_UUID
assert SAMPLE_UUID == check.curl_kibana_with_uuid(plain_running_kibana_pod)
@@ -161,7 +161,7 @@ def test_curl_kibana_with_uuid():
),
], ids=lambda argval: argval[0])
def test_failed_curl_kibana_with_uuid(name, json_response, expect_error):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
check.generate_uuid = lambda: SAMPLE_UUID
with pytest.raises(OpenShiftCheckException) as error:
diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py
index 789784c77..bc0c3b26c 100644
--- a/roles/openshift_health_checker/test/openshift_check_test.py
+++ b/roles/openshift_health_checker/test/openshift_check_test.py
@@ -106,13 +106,40 @@ def test_get_var_convert(task_vars, keys, convert, expected):
assert dummy_check(task_vars).get_var(*keys, convert=convert) == expected
-@pytest.mark.parametrize("keys, convert", [
- (("bar", "baz"), int),
- (("bar.baz"), float),
- (("foo"), "bogus"),
- (("foo"), lambda a, b: 1),
- (("foo"), lambda a: 1 / 0),
+def convert_oscexc(_):
+ raise OpenShiftCheckException("known failure")
+
+
+def convert_exc(_):
+ raise Exception("failure unknown")
+
+
+@pytest.mark.parametrize("keys, convert, expect_text", [
+ (("bar", "baz"), int, "Cannot convert"),
+ (("bar.baz",), float, "Cannot convert"),
+ (("foo",), "bogus", "TypeError"),
+ (("foo",), lambda a, b: 1, "TypeError"),
+ (("foo",), lambda a: 1 / 0, "ZeroDivisionError"),
+ (("foo",), convert_oscexc, "known failure"),
+ (("foo",), convert_exc, "failure unknown"),
])
-def test_get_var_convert_error(task_vars, keys, convert):
- with pytest.raises(OpenShiftCheckException):
+def test_get_var_convert_error(task_vars, keys, convert, expect_text):
+ with pytest.raises(OpenShiftCheckException) as excinfo:
dummy_check(task_vars).get_var(*keys, convert=convert)
+ assert expect_text in str(excinfo.value)
+
+
+def test_register(task_vars):
+ check = dummy_check(task_vars)
+
+ check.register_failure(OpenShiftCheckException("spam"))
+ assert "spam" in str(check.failures[0])
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.register_file("spam") # no file contents specified
+ assert "not specified" in str(excinfo.value)
+
+ # normally execute_module registers the result file; test disabling that
+ check._execute_module = lambda *args, **_: dict()
+ check.execute_module("eggs", module_args={}, register=False)
+ assert not check.files_to_save
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index e1bf29d2a..602f32989 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -50,7 +50,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
)
- return_value = object()
+ return_value = {} # note: check.execute_module modifies return hash contents
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'rpm_version'
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 1fe648b75..b34e8fbfc 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -49,14 +49,14 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
])
def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
- return_value = object()
+ return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
assert set(module_args['packages']).issuperset(must_have_packages)
assert not set(module_args['packages']).intersection(must_not_have_packages)
- return return_value
+ return {'foo': return_value}
result = PackageAvailability(execute_module, task_vars).run()
- assert result is return_value
+ assert result['foo'] is return_value
diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py
index 06489b0d7..85d3c9cab 100644
--- a/roles/openshift_health_checker/test/package_update_test.py
+++ b/roles/openshift_health_checker/test/package_update_test.py
@@ -2,14 +2,14 @@ from openshift_checks.package_update import PackageUpdate
def test_package_update():
- return_value = object()
+ return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
# empty list of packages means "generic check if 'yum update' will work"
assert module_args['packages'] == []
- return return_value
+ return {'foo': return_value}
result = PackageUpdate(execute_module).run()
- assert result is return_value
+ assert result['foo'] is return_value
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index e871f39f0..8564cd4db 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -52,7 +52,7 @@ def test_invalid_openshift_release_format():
])
def test_package_version(openshift_release):
- return_value = object()
+ return_value = {"foo": object()}
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *_):
assert module_name == 'aos_version'
@@ -66,7 +66,7 @@ def test_package_version(openshift_release):
check = PackageVersion(execute_module, task_vars_for(openshift_release, 'origin'))
result = check.run()
- assert result is return_value
+ assert result == return_value
@pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [
@@ -79,7 +79,7 @@ def test_package_version(openshift_release):
])
def test_docker_package_version(deployment_type, openshift_release, expected_docker_version):
- return_value = object()
+ return_value = {"foo": object()}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'aos_version'
@@ -93,7 +93,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
check = PackageVersion(execute_module, task_vars_for(openshift_release, deployment_type))
result = check.run()
- assert result is return_value
+ assert result == return_value
@pytest.mark.parametrize('group_names,is_containerized,is_active', [
diff --git a/roles/openshift_health_checker/test/zz_failure_summary_test.py b/roles/openshift_health_checker/test/zz_failure_summary_test.py
index 0fc258133..69f27653c 100644
--- a/roles/openshift_health_checker/test/zz_failure_summary_test.py
+++ b/roles/openshift_health_checker/test/zz_failure_summary_test.py
@@ -65,6 +65,21 @@ import pytest
},
],
),
+ # if a failure contain an unhashable value, it will not be deduplicated
+ (
+ [
+ {
+ 'host': 'master1',
+ 'msg': {'unhashable': 'value'},
+ },
+ ],
+ [
+ {
+ 'host': 'master1',
+ 'msg': {'unhashable': 'value'},
+ },
+ ],
+ ),
])
def test_deduplicate_failures(failures, deduplicated):
assert deduplicate_failures(failures) == deduplicated
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index 08c1d849e..c234c3740 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -1,14 +1,33 @@
---
-r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
-r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+##########
+# Common #
+##########
+openshift_hosted_infra_selector: "region=infra"
+r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}"
+r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}"
-r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
-r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+openshift_default_projects:
+ default:
+ default_node_selector: ''
+ logging:
+ default_node_selector: ''
+ openshift-infra:
+ default_node_selector: ''
-openshift_hosted_router_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}"
-openshift_hosted_registry_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}"
+# openshift_additional_projects shares the same format as openshift_default_projects
+openshift_additional_projects: {}
-registry_volume_claim: 'registry-claim'
+openshift_config_base: "/etc/origin"
+openshift_master_config_dir: "{{ openshift.common.config_base | default(openshift_config_base) }}/master"
+openshift_cluster_domain: 'cluster.local'
+
+##########
+# Router #
+##########
+r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
openshift_hosted_router_edits:
- key: spec.strategy.rollingParams.intervalSeconds
@@ -36,20 +55,49 @@ openshift_hosted_routers:
certificate: "{{ openshift_hosted_router_certificate | default({}) }}"
openshift_hosted_router_certificate: {}
-openshift_hosted_registry_cert_expire_days: 730
openshift_hosted_router_create_certificate: True
r_openshift_hosted_router_os_firewall_deny: []
r_openshift_hosted_router_os_firewall_allow: []
+############
+# Registry #
+############
+
+r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+openshift_hosted_registry_name: docker-registry
+openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
+registry_volume_claim: 'registry-claim'
+openshift_hosted_registry_cert_expire_days: 730
+
r_openshift_hosted_registry_os_firewall_deny: []
r_openshift_hosted_registry_os_firewall_allow:
- service: Docker Registry Port
port: 5000/tcp
cond: "{{ r_openshift_hosted_use_calico }}"
-# NOTE
-# r_openshift_hosted_use_calico_default may be defined external to this role.
-# openshift_use_calico, if defined, may affect other roles or play behavior.
-r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}"
-r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}"
+openshift_hosted_registry_serviceaccount: registry
+openshift_hosted_registry_volumes: []
+openshift_hosted_registry_env_vars: {}
+
+# These edits are being specified only to prevent 'changed' on rerun
+openshift_hosted_registry_edits:
+- key: spec.strategy.rollingParams
+ value:
+ intervalSeconds: 1
+ maxSurge: "25%"
+ maxUnavailable: "25%"
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ action: put
+
+openshift_hosted_registry_force:
+- False
+
+openshift_push_via_dns: False
+
+# NOTE: settting openshift_docker_hosted_registry_insecure may affect other roles
+openshift_hosted_docker_registry_insecure_default: "{{ openshift_docker_hosted_registry_insecure | default(False) }}"
+openshift_hosted_docker_registry_insecure: "{{ openshift_hosted_docker_registry_insecure_default }}"
diff --git a/roles/openshift_hosted/filter_plugins/filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
index 7f41529ac..7f41529ac 100644
--- a/roles/openshift_hosted/filter_plugins/filters.py
+++ b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index 28fd396d6..1d70ef7eb 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -12,7 +12,6 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_cli
- role: openshift_hosted_facts
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/openshift_hosted/tasks/create_projects.yml b/roles/openshift_hosted/tasks/create_projects.yml
new file mode 100644
index 000000000..1b25d0c64
--- /dev/null
+++ b/roles/openshift_hosted/tasks/create_projects.yml
@@ -0,0 +1,14 @@
+---
+- name: Create default projects
+ oc_project:
+ name: "{{ item.key }}"
+ node_selector:
+ - "{{ item.value.default_node_selector }}"
+ with_dict: "{{ openshift_default_projects }}"
+
+- name: Create additional projects
+ oc_project:
+ name: "{{ item.key }}"
+ node_selector:
+ - "{{ item.value.default_node_selector }}"
+ with_dict: "{{ openshift_additional_projects }}"
diff --git a/roles/openshift_hosted/tasks/router/firewall.yml b/roles/openshift_hosted/tasks/firewall.yml
index ff90f3372..1eb2c92c8 100644
--- a/roles/openshift_hosted/tasks/router/firewall.yml
+++ b/roles/openshift_hosted/tasks/firewall.yml
@@ -8,7 +8,7 @@
protocol: "{{ item.port.split('/')[1] }}"
port: "{{ item.port.split('/')[0] }}"
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_router_os_firewall_allow }}"
+ with_items: "{{ l_openshift_hosted_fw_allow }}"
- name: Remove iptables rules
os_firewall_manage_iptables:
@@ -17,9 +17,9 @@
protocol: "{{ item.port.split('/')[1] }}"
port: "{{ item.port.split('/')[0] }}"
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_router_os_firewall_deny }}"
+ with_items: "{{ l_openshift_hosted_fw_deny }}"
-- when: r_openshift_hosted_router_firewall_enabled | bool and r_openshift_hosted_router_use_firewalld | bool
+- when: l_openshift_hosted_firewall_enabled | bool and l_openshift_hosted_use_firewalld | bool
block:
- name: Add firewalld allow rules
firewalld:
@@ -28,7 +28,7 @@
immediate: true
state: enabled
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_router_os_firewall_allow }}"
+ with_items: "{{ l_openshift_hosted_fw_allow }}"
- name: Remove firewalld allow rules
firewalld:
@@ -37,4 +37,4 @@
immediate: true
state: disabled
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_router_os_firewall_deny }}"
+ with_items: "{{ l_openshift_hosted_fw_deny }}"
diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml
index 6efe2f63c..d306adf42 100644
--- a/roles/openshift_hosted/tasks/main.yml
+++ b/roles/openshift_hosted/tasks/main.yml
@@ -1,13 +1,9 @@
---
-- name: Create projects
- oc_project:
- name: "{{ item.key }}"
- node_selector:
- - "{{ item.value.default_node_selector }}"
- with_dict: "{{ openshift_projects }}"
-
-- include: router/router.yml
- when: openshift_hosted_manage_router | default(true) | bool
-
-- include: registry/registry.yml
- when: openshift_hosted_manage_registry | default(true) | bool
+# This role is intended to be used with include_role.
+# include_role:
+# name: openshift_hosted
+# tasks_from: "{{ item }}"
+# with_items:
+# - create_projects.yml
+# - router.yml
+# - registry.yml
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index d73c290ff..f1aa9c5a8 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -1,7 +1,11 @@
---
- name: setup firewall
include: firewall.yml
- static: yes
+ vars:
+ l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_registry_firewall_enabled }}"
+ l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_registry_use_firewalld }}"
+ l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_registry_os_firewall_allow }}"
+ l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_registry_os_firewall_deny }}"
- when: openshift.hosted.registry.replicas | default(none) is none
block:
@@ -36,30 +40,14 @@
- name: set openshift_hosted facts
set_fact:
openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}"
- openshift_hosted_registry_name: docker-registry
- openshift_hosted_registry_serviceaccount: registry
openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}"
openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}"
- openshift_hosted_registry_volumes: []
- openshift_hosted_registry_env_vars: {}
- openshift_hosted_registry_edits:
- # These edits are being specified only to prevent 'changed' on rerun
- - key: spec.strategy.rollingParams
- value:
- intervalSeconds: 1
- maxSurge: "25%"
- maxUnavailable: "25%"
- timeoutSeconds: 600
- updatePeriodSeconds: 1
- action: put
- openshift_hosted_registry_force:
- - False
- name: Update registry environment variables when pushing via dns
set_fact:
openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}"
- when: openshift_push_via_dns | default(false) | bool
+ when: openshift_push_via_dns | bool
- name: Update registry proxy settings for dc/docker-registry
set_fact:
@@ -137,36 +125,17 @@
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
-- when: openshift_hosted_registry_wait
- block:
- - name: Ensure OpenShift registry correctly rolls out (best-effort today)
- command: |
- oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \
- --namespace {{ openshift_hosted_registry_namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig
- async: 600
- poll: 15
- failed_when: false
-
- - name: Determine the latest version of the OpenShift registry deployment
- command: |
- {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \
- --namespace {{ openshift_hosted_registry_namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .status.latestVersion }'
- register: openshift_hosted_registry_latest_version
-
- - name: Sanity-check that the OpenShift registry rolled out correctly
- command: |
- {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \
- --namespace {{ openshift_hosted_registry_namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
- register: openshift_hosted_registry_rc_phase
- until: "'Running' not in openshift_hosted_registry_rc_phase.stdout"
- delay: 15
- retries: 40
- failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout"
+- name: setup registry list
+ set_fact:
+ r_openshift_hosted_registry_list:
+ - name: "{{ openshift_hosted_registry_name }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+
+- name: Wait for pod (Registry)
+ include: wait_for_pod.yml
+ vars:
+ l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}"
+ l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}"
- include: storage/glusterfs.yml
when:
diff --git a/roles/openshift_hosted/tasks/registry/firewall.yml b/roles/openshift_hosted/tasks/registry/firewall.yml
deleted file mode 100644
index 775b7d6d7..000000000
--- a/roles/openshift_hosted/tasks/registry/firewall.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- when: r_openshift_hosted_registry_firewall_enabled | bool and not r_openshift_hosted_registry_use_firewalld | bool
- block:
- - name: Add iptables allow rules
- os_firewall_manage_iptables:
- name: "{{ item.service }}"
- action: add
- protocol: "{{ item.port.split('/')[1] }}"
- port: "{{ item.port.split('/')[0] }}"
- when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_registry_os_firewall_allow }}"
-
- - name: Remove iptables rules
- os_firewall_manage_iptables:
- name: "{{ item.service }}"
- action: remove
- protocol: "{{ item.port.split('/')[1] }}"
- port: "{{ item.port.split('/')[0] }}"
- when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_registry_os_firewall_deny }}"
-
-- when: r_openshift_hosted_registry_firewall_enabled | bool and r_openshift_hosted_registry_use_firewalld | bool
- block:
- - name: Add firewalld allow rules
- firewalld:
- port: "{{ item.port }}"
- permanent: true
- immediate: true
- state: enabled
- when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_registry_os_firewall_allow }}"
-
- - name: Remove firewalld allow rules
- firewalld:
- port: "{{ item.port }}"
- permanent: true
- immediate: true
- state: disabled
- when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_registry_os_firewall_deny }}"
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router.yml
index 68ec7233e..2aceef9e4 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -1,7 +1,11 @@
---
- name: setup firewall
include: firewall.yml
- static: yes
+ vars:
+ l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_router_firewall_enabled }}"
+ l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_router_use_firewalld }}"
+ l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_router_os_firewall_allow }}"
+ l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_router_os_firewall_deny }}"
- name: Retrieve list of openshift nodes matching router selector
oc_obj:
@@ -48,9 +52,9 @@
certfile: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}"
keyfile: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}"
cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}"
-
- # End Block
- when: ( openshift_hosted_router_create_certificate | bool ) and openshift_hosted_router_certificate == {}
+ when:
+ - openshift_hosted_router_create_certificate | bool
+ - openshift_hosted_router_certificate == {}
- name: Create the router service account(s)
oc_serviceaccount:
@@ -82,7 +86,7 @@
replicas: "{{ item.replicas }}"
namespace: "{{ item.namespace | default('default') }}"
# This option is not yet implemented
- # force_subdomain: "{{ openshift.hosted.router.force_subdomain | default(none) }}"
+ # force_subdomain: "{{ openshift_hosted_router_force_subdomain | default(none) }}"
service_account: "{{ item.serviceaccount | default('router') }}"
selector: "{{ item.selector | default(none) }}"
images: "{{ item.images | default(omit) }}"
@@ -94,38 +98,8 @@
stats_port: "{{ item.stats_port }}"
with_items: "{{ openshift_hosted_routers }}"
-- when: openshift_hosted_router_wait
- block:
- - name: Ensure OpenShift router correctly rolls out (best-effort today)
- command: |
- {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
- --namespace {{ item.namespace | default('default') }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig
- async: 600
- poll: 15
- with_items: "{{ openshift_hosted_routers }}"
- failed_when: false
-
- - name: Determine the latest version of the OpenShift router deployment
- command: |
- {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
- --namespace {{ item.namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .status.latestVersion }'
- register: openshift_hosted_routers_latest_version
- with_items: "{{ openshift_hosted_routers }}"
-
- - name: Poll for OpenShift router deployment success
- command: |
- {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
- --namespace {{ item.0.namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
- register: openshift_hosted_router_rc_phase
- until: "'Running' not in openshift_hosted_router_rc_phase.stdout"
- delay: 15
- retries: 40
- failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout"
- with_together:
- - "{{ openshift_hosted_routers }}"
- - "{{ openshift_hosted_routers_latest_version.results }}"
+- name: Wait for pod (Routers)
+ include: wait_for_pod.yml
+ vars:
+ l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}"
+ l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}"
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/secure.yml
index a8a6f6fc8..0da8ac8a7 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/secure.yml
@@ -1,7 +1,7 @@
---
- name: Configure facts for docker-registry
set_fact:
- openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routecertificates, {}) }}"
+ openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}"
openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}"
openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}"
@@ -38,11 +38,11 @@
- "{{ docker_registry_service.results.clusterip }}"
- "{{ docker_registry_route.results[0].spec.host }}"
- "{{ openshift_hosted_registry_name }}.default.svc"
- - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift.common.dns_domain }}"
+ - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}"
- "{{ openshift_hosted_registry_routehost }}"
cert: "{{ docker_registry_cert_path }}"
key: "{{ docker_registry_key_path }}"
- expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool else omit }}"
+ expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift_deployment_type) | bool else omit }}"
register: registry_self_cert
when: docker_registry_self_signed
diff --git a/roles/openshift_hosted/tasks/registry/secure/passthrough.yml b/roles/openshift_hosted/tasks/secure/passthrough.yml
index 5b44fda10..5b44fda10 100644
--- a/roles/openshift_hosted/tasks/registry/secure/passthrough.yml
+++ b/roles/openshift_hosted/tasks/secure/passthrough.yml
diff --git a/roles/openshift_hosted/tasks/registry/secure/reencrypt.yml b/roles/openshift_hosted/tasks/secure/reencrypt.yml
index 48e5b0fba..48e5b0fba 100644
--- a/roles/openshift_hosted/tasks/registry/secure/reencrypt.yml
+++ b/roles/openshift_hosted/tasks/secure/reencrypt.yml
diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml
index c2954fde1..c2954fde1 100644
--- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/storage/object_storage.yml
index 8553a8098..8553a8098 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/storage/object_storage.yml
diff --git a/roles/openshift_hosted/tasks/registry/storage/registry_config.j2 b/roles/openshift_hosted/tasks/storage/registry_config.j2
index f3e82ad4f..f3e82ad4f 120000
--- a/roles/openshift_hosted/tasks/registry/storage/registry_config.j2
+++ b/roles/openshift_hosted/tasks/storage/registry_config.j2
diff --git a/roles/openshift_hosted/tasks/registry/storage/s3.yml b/roles/openshift_hosted/tasks/storage/s3.yml
index 318969885..8e905d905 100644
--- a/roles/openshift_hosted/tasks/registry/storage/s3.yml
+++ b/roles/openshift_hosted/tasks/storage/s3.yml
@@ -3,7 +3,7 @@
assert:
that:
- openshift.hosted.registry.storage.s3.bucket | default(none) is not none
- - openshift.hosted.registry.storage.s3.region | default(none) is not none
+ - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
msg: |
When using S3 storage, the following variables are required:
openshift_hosted_registry_storage_s3_bucket
diff --git a/roles/openshift_hosted/tasks/wait_for_pod.yml b/roles/openshift_hosted/tasks/wait_for_pod.yml
new file mode 100644
index 000000000..056c79334
--- /dev/null
+++ b/roles/openshift_hosted/tasks/wait_for_pod.yml
@@ -0,0 +1,36 @@
+---
+- when: l_openshift_hosted_wait_for_pod | default(False) | bool
+ block:
+ - name: Ensure OpenShift pod correctly rolls out (best-effort today)
+ command: |
+ {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
+ --namespace {{ item.namespace | default('default') }} \
+ --config {{ openshift_master_config_dir }}/admin.kubeconfig
+ async: 600
+ poll: 15
+ with_items: "{{ l_openshift_hosted_wfp_items }}"
+ failed_when: false
+
+ - name: Determine the latest version of the OpenShift pod deployment
+ command: |
+ {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
+ --namespace {{ item.namespace }} \
+ --config {{ openshift_master_config_dir }}/admin.kubeconfig \
+ -o jsonpath='{ .status.latestVersion }'
+ register: l_openshift_hosted_wfp_latest_version
+ with_items: "{{ l_openshift_hosted_wfp_items }}"
+
+ - name: Poll for OpenShift pod deployment success
+ command: |
+ {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
+ --namespace {{ item.0.namespace }} \
+ --config {{ openshift_master_config_dir }}/admin.kubeconfig \
+ -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
+ register: openshift_hosted_wfp_rc_phase
+ until: "'Running' not in openshift_hosted_wfp_rc_phase.stdout"
+ delay: 15
+ retries: 40
+ failed_when: "'Failed' in openshift_hosted_wfp_rc_phase.stdout"
+ with_together:
+ - "{{ l_openshift_hosted_wfp_items }}"
+ - "{{ l_openshift_hosted_wfp_latest_version.results }}"
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index 61da452de..eae8b328e 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -70,10 +70,8 @@ auth:
openshift:
realm: openshift
middleware:
-{% if openshift.common.version_gte_3_3_or_1_3 | bool %}
registry:
- name: openshift
-{% endif %}
repository:
- name: openshift
options:
@@ -87,7 +85,7 @@ middleware:
baseurl: {{ openshift_hosted_registry_storage_s3_cloudfront_baseurl }}
privatekey: /etc/origin/cloudfront.pem
keypairid: {{ openshift_hosted_registry_storage_s3_cloudfront_keypairid }}
-{% elif openshift.common.version_gte_3_3_or_1_3 | bool %}
+{% else %}
storage:
- name: openshift
{% endif -%}
diff --git a/roles/openshift_hosted/vars/main.yml b/roles/openshift_hosted/vars/main.yml
index 0821d0e7e..0e756d9e1 100644
--- a/roles/openshift_hosted/vars/main.yml
+++ b/roles/openshift_hosted/vars/main.yml
@@ -1,13 +1,2 @@
---
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
registry_config_secret_name: registry-config
-
-openshift_default_projects:
- default:
- default_node_selector: ''
- logging:
- default_node_selector: ''
- openshift-infra:
- default_node_selector: ''
-
-openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts(openshift_default_projects) }}"
diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml
index 631bf3e2a..53d1a8bc7 100644
--- a/roles/openshift_hosted_facts/tasks/main.yml
+++ b/roles/openshift_hosted_facts/tasks/main.yml
@@ -8,9 +8,10 @@
- name: Set hosted facts
openshift_facts:
- role: hosted
+ role: "{{ item }}"
openshift_env: "{{ hostvars
| oo_merge_hostvars(vars, inventory_hostname)
| oo_openshift_env }}"
openshift_env_structures:
- 'openshift.hosted.router.*'
+ with_items: [hosted, logging, loggingops, metrics]
diff --git a/roles/openshift_hosted_logging/README.md b/roles/openshift_hosted_logging/README.md
deleted file mode 100644
index 680303853..000000000
--- a/roles/openshift_hosted_logging/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-###Required vars:
-
-- openshift_hosted_logging_hostname: kibana.example.com
-- openshift_hosted_logging_elasticsearch_cluster_size: 1
-- openshift_hosted_logging_master_public_url: https://localhost:8443
-
-###Optional vars:
-- openshift_hosted_logging_image_prefix: logging image prefix. No default. Use this to specify an alternate image repository e.g. my.private.repo:5000/private_openshift/
-- target_registry: DEPRECATED - use openshift_hosted_logging_image_prefix instead
-- openshift_hosted_logging_image_version: logging image version suffix. Defaults to the current version of the deployed software.
-- openshift_hosted_logging_secret_vars: (defaults to nothing=/dev/null) kibana.crt=/etc/origin/master/ca.crt kibana.key=/etc/origin/master/ca.key ca.crt=/etc/origin/master/ca.crt ca.key=/etc/origin/master/ca.key
-- openshift_hosted_logging_fluentd_replicas: (defaults to 1) 3
-- openshift_hosted_logging_cleanup: (defaults to no) Set this to 'yes' in order to cleanup logging components instead of deploying.
-- openshift_hosted_logging_elasticsearch_instance_ram: Amount of RAM to reserve per ElasticSearch instance (e.g. 1024M, 2G). Defaults to 8GiB; must be at least 512M (Ref.: [ElasticSearch documentation](https://www.elastic.co/guide/en/elasticsearch/guide/current/hardware.html\#\_memory).
-- openshift_hosted_logging_elasticsearch_pvc_size: Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead.
-- openshift_hosted_logging_elasticsearch_pvc_prefix: Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size `openshift_hosted_logging_elasticsearch_pvc_size`.
-- openshift_hosted_logging_elasticsearch_pvc_dynamic: Set to `true` to have created PersistentVolumeClaims annotated such that their backing storage can be dynamically provisioned (if that is available for your cluster).
-- openshift_hosted_logging_elasticsearch_storage_group: Number of a supplemental group ID for access to Elasticsearch storage volumes; backing volumes should allow access by this group ID (defaults to 65534).
-- openshift_hosted_logging_elasticsearch_nodeselector: Specify the nodeSelector that Elasticsearch should be use (label=value)
-- openshift_hosted_logging_fluentd_nodeselector: The nodeSelector used to determine which nodes to apply the `openshift_hosted_logging_fluentd_nodeselector_label` label to.
-- openshift_hosted_logging_fluentd_nodeselector_label: The label applied to nodes included in the Fluentd DaemonSet. Defaults to "logging-infra-fluentd=true".
-- openshift_hosted_logging_kibana_nodeselector: Specify the nodeSelector that Kibana should be use (label=value)
-- openshift_hosted_logging_curator_nodeselector: Specify the nodeSelector that Curator should be use (label=value)
-- openshift_hosted_logging_enable_ops_cluster: If "true", configure a second ES cluster and Kibana for ops logs.
-- openshift_hosted_logging_use_journal: *DEPRECATED - DO NOT USE*
-- openshift_hosted_logging_journal_source: By default, if this param is unset or empty, logging will use `/var/log/journal` if it exists, or `/run/log/journal` if not. You can use this param to force logging to use a different location.
-- openshift_hosted_logging_journal_read_from_head: Set to `true` to have fluentd read from the beginning of the journal, to get historical log data. Default is `false`. *WARNING* Using `true` may take several minutes or even hours, depending on the size of the journal, until any new records show up in Elasticsearch, and will cause fluentd to consume a lot of CPU and RAM resources.
-
-When `openshift_hosted_logging_enable_ops_cluster` is `True`, there are some
-additional vars. These work the same as above for their non-ops counterparts,
-but apply to the OPS cluster instance:
-- openshift_hosted_logging_ops_hostname: kibana-ops.example.com
-- openshift_hosted_logging_elasticsearch_ops_cluster_size
-- openshift_hosted_logging_elasticsearch_ops_instance_ram
-- openshift_hosted_logging_elasticsearch_ops_pvc_size
-- openshift_hosted_logging_elasticsearch_ops_pvc_prefix
-- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic
-- openshift_hosted_logging_elasticsearch_ops_nodeselector
-- openshift_hosted_logging_kibana_ops_nodeselector
-- openshift_hosted_logging_curator_ops_nodeselector
diff --git a/roles/openshift_hosted_logging/defaults/main.yml b/roles/openshift_hosted_logging/defaults/main.yml
deleted file mode 100644
index a01f24df8..000000000
--- a/roles/openshift_hosted_logging/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"
diff --git a/roles/openshift_hosted_logging/handlers/main.yml b/roles/openshift_hosted_logging/handlers/main.yml
deleted file mode 100644
index d7e83fe9a..000000000
--- a/roles/openshift_hosted_logging/handlers/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml
deleted file mode 100644
index ab07a77c1..000000000
--- a/roles/openshift_hosted_logging/meta/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: openshift_master_facts }
diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
deleted file mode 100644
index 70b0d67a4..000000000
--- a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
----
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
-
-- name: "Checking for logging project"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
- register: logging_project
- failed_when: "'FAILED' in logging_project.stderr"
-
-- name: "Changing projects"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
-
-
-- name: "Cleanup any previous logging infrastructure"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
- with_items:
- - kibana
- - fluentd
- - elasticsearch
- ignore_errors: yes
-
-- name: "Cleanup existing support infrastructure"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
- ignore_errors: yes
-
-- name: "Cleanup existing secrets"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
- ignore_errors: yes
- register: clean_result
- failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
-
-- name: "Cleanup existing logging deployers"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
-
-
-- name: "Cleanup logging project"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
-
-
-- name: "Remove deployer template"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
- register: delete_output
- failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr
-
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
-
-- debug: msg="Success!"
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
deleted file mode 100644
index 78b624109..000000000
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
----
-- debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
- when: target_registry is defined and target_registry
-
-- fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
- when: "openshift_hosted_logging_hostname is not defined or
- openshift_hosted_logging_elasticsearch_cluster_size is not defined or
- openshift_hosted_logging_master_public_url is not defined"
-
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
-
-- name: "Check for logging project already exists"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
- register: logging_project_result
- ignore_errors: True
-
-- name: "Create logging project"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
- when: logging_project_result.stdout == ""
-
-- name: "Changing projects"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
-
-- name: "Creating logging deployer secret"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
- register: secret_output
- failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr
-
-- name: "Create templates for logging accounts and the deployer"
- command: >
- {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
- -f {{ hosted_base }}/logging-deployer.yaml
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n logging
- register: logging_import_template
- failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
- changed_when: "'created' in logging_import_template.stdout"
-
-- name: "Process the logging accounts template"
- shell: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
- register: process_deployer_accounts
- failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
-
-- name: "Set permissions for logging-deployer service account"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
- register: permiss_output
- failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr
-
-- name: "Set permissions for fluentd"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
- register: fluentd_output
- failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
-
-- name: "Set additional permissions for fluentd"
- command: >
- {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
- add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
- register: fluentd2_output
- failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
-
-- name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-cluster-role-to-user rolebinding-reader \
- system:serviceaccount:logging:aggregated-logging-elasticsearch
- register: rolebinding_reader_output
- failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr
-
-- name: "Create ConfigMap for deployer parameters"
- command: >
- {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
- register: deployer_configmap_output
- failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr
-
-- name: "Process the deployer template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
- register: process_deployer
- failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr
-
-- name: "Wait for image pull and deployer pod"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 15
-
-- name: "Process imagestream template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- register: process_is
- failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr
-
-- name: "Set insecured registry"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
-
-- name: "Wait for imagestreams to become available"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 and 'not found' not in result.stderr
- retries: 20
- delay: 5
-
-- name: "Wait for component pods to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
- with_items:
- - es
- - kibana
- - curator
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
-- name: "Wait for ops component pods to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
- with_items:
- - es-ops
- - kibana-ops
- - curator-ops
- when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
-- name: "Wait for fluentd DaemonSet to exist"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 5
-
-- name: "Deploy fluentd by labeling the node"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"
-
-- name: "Wait for fluentd to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
-- include: update_master_config.yaml
-
-- debug:
- msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/roles/openshift_hosted_logging/tasks/main.yaml b/roles/openshift_hosted_logging/tasks/main.yaml
deleted file mode 100644
index 42568597a..000000000
--- a/roles/openshift_hosted_logging/tasks/main.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Cleanup logging deployment
- include: "{{ role_path }}/tasks/cleanup_logging.yaml"
- when: openshift_hosted_logging_cleanup | default(false) | bool
-
-- name: Deploy logging
- include: "{{ role_path }}/tasks/deploy_logging.yaml"
- when: not openshift_hosted_logging_cleanup | default(false) | bool
diff --git a/roles/openshift_hosted_logging/tasks/update_master_config.yaml b/roles/openshift_hosted_logging/tasks/update_master_config.yaml
deleted file mode 100644
index 1122e059c..000000000
--- a/roles/openshift_hosted_logging/tasks/update_master_config.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Adding Kibana route information to loggingPublicURL
- modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: assetConfig.loggingPublicURL
- yaml_value: "https://{{ logging_hostname }}"
- notify: restart master
diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml
deleted file mode 100644
index 4b350b244..000000000
--- a/roles/openshift_hosted_logging/vars/main.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-tr_or_ohlip: "{{ openshift_hosted_logging_deployer_prefix | default(target_registry) | default(None) }}"
-ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip != '' else '' }}"
-iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_deployer_version | quote if openshift_hosted_logging_deployer_version | default(none) is not none else '' }}"
-oc_new_app_values: "{{ ip_kv }} {{ iv_kv }}"
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-kh_cmap_param: "{{ '--from-literal kibana-hostname=' ~ openshift_hosted_logging_hostname | quote if openshift_hosted_logging_hostname | default(none) is not none else '' }}"
-kh_ops_cmap_param: "{{ '--from-literal kibana-ops-hostname=' ~ openshift_hosted_logging_ops_hostname | quote if openshift_hosted_logging_ops_hostname | default(none) is not none else '' }}"
-pmu_cmap_param: "{{ '--from-literal public-master-url=' ~ openshift_hosted_logging_master_public_url | quote if openshift_hosted_logging_master_public_url | default(none) is not none else '' }}"
-es_cs_cmap_param: "{{ '--from-literal es-cluster-size=' ~ openshift_hosted_logging_elasticsearch_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_cluster_size | default(none) is not none else '' }}"
-es_ops_cs_cmap_param: "{{ '--from-literal es-ops-cluster-size=' ~ openshift_hosted_logging_elasticsearch_ops_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_ops_cluster_size | default(none) is not none else '' }}"
-es_ir_cmap_param: "{{ '--from-literal es-instance-ram=' ~ openshift_hosted_logging_elasticsearch_instance_ram | quote if openshift_hosted_logging_elasticsearch_instance_ram | default(none) is not none else '' }}"
-es_ops_ir_cmap_param: "{{ '--from-literal es-ops-instance-ram=' ~ openshift_hosted_logging_elasticsearch_ops_instance_ram | quote if openshift_hosted_logging_elasticsearch_ops_instance_ram | default(none) is not none else '' }}"
-es_pvcs_cmap_param: "{{ '--from-literal es-pvc-size=' ~ openshift_hosted_logging_elasticsearch_pvc_size | quote if openshift_hosted_logging_elasticsearch_pvc_size | default(none) is not none else '' }}"
-es_ops_pvcs_cmap_param: "{{ '--from-literal es-ops-pvc-size=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_size | quote if openshift_hosted_logging_elasticsearch_ops_pvc_size | default(none) is not none else '' }}"
-es_pvcp_cmap_param: "{{ '--from-literal es-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_pvc_prefix | default(none) is not none else '' }}"
-es_ops_pvcp_cmap_param: "{{ '--from-literal es-ops-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default(none) is not none else '' }}"
-es_pvcd_cmap_param: "{{ '--from-literal es-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_pvc_dynamic | default(none) is not none else '' }}"
-es_ops_pvcd_cmap_param: "{{ '--from-literal es-ops-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(none) is not none else '' }}"
-es_sg_cmap_param: "{{ '--from-literal storage-group=' ~ openshift_hosted_logging_elasticsearch_storage_group | string | quote if openshift_hosted_logging_elasticsearch_storage_group | default(none) is not none else '' }}"
-es_ns_cmap_param: "{{ '--from-literal es-nodeselector=' ~ openshift_hosted_logging_elasticsearch_nodeselector | quote if openshift_hosted_logging_elasticsearch_nodeselector | default(none) is not none else '' }}"
-es_ops_ns_cmap_param: "{{ '--from-literal es-ops-nodeselector=' ~ openshift_hosted_logging_elasticsearch_ops_nodeselector | quote if openshift_hosted_logging_elasticsearch_ops_nodeselector | default(none) is not none else '' }}"
-fd_ns_cmap_param: "{{ '--from-literal fluentd-nodeselector=' ~ openshift_hosted_logging_fluentd_nodeselector_label | quote if openshift_hosted_logging_fluentd_nodeselector_label | default(none) is not none else 'logging-infra-fluentd=true' }}"
-kb_ns_cmap_param: "{{ '--from-literal kibana-nodeselector=' ~ openshift_hosted_logging_kibana_nodeselector | quote if openshift_hosted_logging_kibana_nodeselector | default(none) is not none else '' }}"
-kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_hosted_logging_kibana_ops_nodeselector | quote if openshift_hosted_logging_kibana_ops_nodeselector | default(none) is not none else '' }}"
-cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector | default(none) is not none else '' }}"
-cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector | default(none) is not none else '' }}"
-ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster | default(none) is not none else '' }}"
-journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source | default(none) is not none else '' }}"
-journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_journal_read_from_head | string | lower | quote if openshift_hosted_logging_journal_read_from_head | default(none) is not none else '' }}"
-ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret | default(none) is not none else '' }}"
-deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ journal_source_cmap_param }} {{ journal_read_from_head_cmap_param }} {{ ips_cmap_param }}"
diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
index 11478263c..72754df2e 100644
--- a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
index 0e3d006a7..298f8039e 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
index 28feac4e6..dace26793 100644
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index 8bf98ba41..f821efd6b 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
index bbaf76c17..019d836fe 100644
--- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index f283261c4..829c78728 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -12,13 +12,13 @@ generation for Elasticsearch (it uses JKS) as well as openssl to sign certificat
As part of the installation, it is recommended that you add the Fluentd node selector label
to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels).
-###Required vars:
+### Required vars:
- `openshift_logging_install_logging`: When `True` the `openshift_logging` role will install Aggregated Logging.
When `openshift_logging_install_logging` is set to `False` the `openshift_logging` role will uninstall Aggregated Logging.
-###Optional vars:
+### Optional vars:
- `openshift_logging_purge_logging`: When `openshift_logging_install_logging` is set to 'False' to trigger uninstalation and `openshift_logging_purge_logging` is set to 'True', it will completely and irreversibly remove all logging persistent data including PVC. Defaults to 'False'.
- `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'.
- `openshift_logging_curator_image_prefix`: Setting the image prefix for Curator image. Defaults to `openshift_logging_image_prefix`.
@@ -62,7 +62,6 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
- `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'.
-- `openshift_logging_fluentd_es_copy`: Whether or not to use the ES_COPY feature for Fluentd (DEPRECATED). Defaults to 'False'.
- `openshift_logging_fluentd_use_journal`: *DEPRECATED - DO NOT USE* Fluentd will automatically detect whether or not Docker is using the journald log driver.
- `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false.
- `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all'].
@@ -91,6 +90,12 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_es_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'.
- `openshift_logging_es_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'.
+- `openshift_logging_install_eventrouter`: Coupled with `openshift_logging_install_logging`. When both are 'True', eventrouter will be installed. When both are 'False', eventrouter will be uninstalled.
+Other combinations will keep the eventrouter untouched.
+
+Detailed eventrouter configuration can be found in
+- `roles/openshift_logging_eventrouter/README.md`
+
When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the
same as above for their non-ops counterparts, but apply to the OPS cluster instance:
- `openshift_logging_es_ops_host`: logging-es-ops
@@ -164,7 +169,7 @@ Elasticsearch OPS too, if using an OPS cluster:
send the raw logs to mux for processing. We do not currently recommend using
this mode, and ansible will warn you about this.
- `openshift_logging_mux_hostname`: Default is "mux." +
- `openshift_master_default_subdomain`. This is the hostname *external*_
+ `openshift_master_default_subdomain`. This is the hostname *external*
clients will use to connect to mux, and will be used in the TLS server cert
subject.
- `openshift_logging_mux_port`: 24284
@@ -194,3 +199,26 @@ Elasticsearch OPS too, if using an OPS cluster:
Defaults to 'logging-mux'.
- `openshift_logging_mux_file_buffer_storage_group`: The storage group used for Mux.
Defaults to '65534'.
+
+### remote syslog forwarding
+- `openshift_logging_fluentd_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false`
+- `openshift_logging_fluentd_remote_syslog_host`: Required, hostname or IP of remote syslog server
+- `openshift_logging_fluentd_remote_syslog_port`: Port of remote syslog server, defaults to `514`
+- `openshift_logging_fluentd_remote_syslog_severity`: Syslog severity level, defaults to `debug`
+- `openshift_logging_fluentd_remote_syslog_facility`: Syslog facility, defaults to `local0`
+- `openshift_logging_fluentd_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty)
+- `openshift_logging_fluentd_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message
+- `openshift_logging_fluentd_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false`
+- `openshift_logging_fluentd_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message
+
+The corresponding openshift\_logging\_mux\_* parameters are below.
+
+- `openshift_logging_mux_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false`
+- `openshift_logging_mux_remote_syslog_host`: Required, hostname or IP of remote syslog server
+- `openshift_logging_mux_remote_syslog_port`: Port of remote syslog server, defaults to `514`
+- `openshift_logging_mux_remote_syslog_severity`: Syslog severity level, defaults to `debug`
+- `openshift_logging_mux_remote_syslog_facility`: Syslog facility, defaults to `local0`
+- `openshift_logging_mux_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty)
+- `openshift_logging_mux_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message
+- `openshift_logging_mux_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false`
+- `openshift_logging_mux_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 716f0e002..5574a1446 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -1,15 +1,17 @@
---
-openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | default('false') | bool }}"
+openshift_logging_use_ops: False
openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
-openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
+openshift_logging_master_public_url: "{{ 'https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true)) }}"
openshift_logging_namespace: logging
openshift_logging_nodeselector: null
openshift_logging_labels: {}
openshift_logging_label_key: ""
openshift_logging_label_value: ""
-openshift_logging_install_logging: True
+openshift_logging_install_logging: False
+openshift_logging_uninstall_logging: False
+
openshift_logging_purge_logging: False
-openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_image_pull_secret: ""
openshift_logging_curator_default_days: 30
openshift_logging_curator_run_hour: 0
@@ -19,23 +21,23 @@ openshift_logging_curator_script_log_level: INFO
openshift_logging_curator_log_level: ERROR
openshift_logging_curator_cpu_limit: 100m
openshift_logging_curator_memory_limit: null
-openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_curator_nodeselector: {}
openshift_logging_curator_ops_cpu_limit: 100m
openshift_logging_curator_ops_memory_limit: null
-openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_curator_ops_nodeselector: {}
-openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
-openshift_logging_kibana_proxy_memory_limit: 96Mi
+openshift_logging_kibana_proxy_memory_limit: 256Mi
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
-openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}"
-openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_kibana_nodeselector: {}
+openshift_logging_kibana_ops_nodeselector: {}
#The absolute path on the control node to the cert file to use
#for the public facing kibana certs
@@ -49,12 +51,12 @@ openshift_logging_kibana_key: ""
#for the public facing kibana certs
openshift_logging_kibana_ca: ""
-openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: 736Mi
openshift_logging_kibana_ops_proxy_debug: false
openshift_logging_kibana_ops_proxy_cpu_limit: null
-openshift_logging_kibana_ops_proxy_memory_limit: 96Mi
+openshift_logging_kibana_ops_proxy_memory_limit: 256Mi
openshift_logging_kibana_ops_replica_count: 1
#The absolute path on the control node to the cert file to use
@@ -69,12 +71,11 @@ openshift_logging_kibana_ops_key: ""
#for the public facing ops kibana certs
openshift_logging_kibana_ops_ca: ""
-openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
+openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}
openshift_logging_fluentd_cpu_limit: 100m
openshift_logging_fluentd_memory_limit: 512Mi
-openshift_logging_fluentd_es_copy: false
-openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
-openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
+openshift_logging_fluentd_journal_source: ""
+openshift_logging_fluentd_journal_read_from_head: ""
openshift_logging_fluentd_hosts: ['--all']
openshift_logging_fluentd_buffer_queue_limit: 1024
openshift_logging_fluentd_buffer_size_limit: 1m
@@ -84,18 +85,18 @@ openshift_logging_es_port: 9200
openshift_logging_es_ca: /etc/fluent/keys/ca
openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
-openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
+openshift_logging_es_cluster_size: 1
openshift_logging_es_cpu_limit: 1000m
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
-openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
-openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default('') }}"
-openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
-openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
-openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
+openshift_logging_es_memory_limit: "8Gi"
+openshift_logging_es_pv_selector: "{{ openshift_logging_storage_labels | default('') }}"
+openshift_logging_es_pvc_dynamic: "{{ openshift_logging_elasticsearch_pvc_dynamic | default(False) }}"
+openshift_logging_es_pvc_size: "{{ openshift_logging_elasticsearch_pvc_size | default('') }}"
+openshift_logging_es_pvc_prefix: "{{ openshift_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
openshift_logging_es_recover_after_time: 5m
-openshift_logging_es_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
-openshift_logging_es_nodeselector: "{{ openshift_hosted_logging_elasticsearch_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_es_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}"
+openshift_logging_es_nodeselector: {}
# openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml
openshift_logging_es_config: {}
openshift_logging_es_number_of_shards: 1
@@ -125,16 +126,16 @@ openshift_logging_es_ops_port: 9200
openshift_logging_es_ops_ca: /etc/fluent/keys/ca
openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
openshift_logging_es_ops_client_key: /etc/fluent/keys/key
-openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
+openshift_logging_es_ops_cluster_size: "{{ openshift_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: 1000m
-openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"
-openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default('') }}"
-openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
-openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
-openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
+openshift_logging_es_ops_memory_limit: "8Gi"
+openshift_logging_es_ops_pv_selector: "{{ openshift_loggingops_storage_labels | default('') }}"
+openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
+openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}"
+openshift_logging_es_ops_pvc_prefix: "{{ openshift_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
openshift_logging_es_ops_recover_after_time: 5m
-openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
-openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_es_ops_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}"
+openshift_logging_es_ops_nodeselector: {}
# for exposing es-ops to external (outside of the cluster) clients
openshift_logging_es_ops_allow_external: False
@@ -153,7 +154,7 @@ openshift_logging_es_ops_key: ""
openshift_logging_es_ops_ca_ext: ""
# storage related defaults
-openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}"
+openshift_logging_storage_access_modes: ['ReadWriteOnce']
# mux - secure_forward listener service
openshift_logging_mux_allow_external: False
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 45298e345..3040d15ca 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -105,3 +105,9 @@
- logging-elasticsearch
- logging-fluentd
- logging-mux
+
+## EventRouter
+- include_role:
+ name: openshift_logging_eventrouter
+ when:
+ not openshift_logging_install_eventrouter | default(false) | bool
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index a77df9986..2695ef030 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -134,6 +134,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
@@ -165,6 +166,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
@@ -268,4 +270,12 @@
openshift_logging_fluentd_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_fluentd_namespace: "{{ openshift_logging_namespace }}"
+
+## EventRouter
+- include_role:
+ name: openshift_logging_eventrouter
+ when:
+ openshift_logging_install_eventrouter | default(false) | bool
+
+
- include: update_master_config.yaml
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index f475024dd..0da9771c7 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -30,12 +30,13 @@
check_mode: no
become: no
-- include: "{{ role_path }}/tasks/install_logging.yaml"
- when: openshift_logging_install_logging | default(false) | bool
+- include: install_logging.yaml
+ when:
+ - openshift_logging_install_logging | default(false) | bool
-- include: "{{ role_path }}/tasks/delete_logging.yaml"
+- include: delete_logging.yaml
when:
- - not openshift_logging_install_logging | default(false) | bool
+ - openshift_logging_uninstall_logging | default(false) | bool
- name: Cleaning up local temp dir
local_action: file path="{{local_tmp.stdout}}" state=absent
diff --git a/roles/openshift_logging_eventrouter/README.md b/roles/openshift_logging_eventrouter/README.md
new file mode 100644
index 000000000..da313d68b
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/README.md
@@ -0,0 +1,20 @@
+Event router
+------------
+
+A pod forwarding kubernetes events to EFK aggregated logging stack.
+
+- **eventrouter** is deployed to logging project, has a service account and its own role to read events
+- **eventrouter** watches kubernetes events, marshalls them to JSON and outputs to its sink, currently only various formatting to STDOUT
+- **fluentd** picks them up and inserts to elasticsearch *.operations* index
+
+- `openshift_logging_install_eventrouter`: When 'True', eventrouter will be installed. When 'False', eventrouter will be uninstalled.
+
+Configuration variables:
+
+- `openshift_logging_eventrouter_image_prefix`: The prefix for the eventrouter logging image. Defaults to `openshift_logging_image_prefix`.
+- `openshift_logging_eventrouter_image_version`: The image version for the logging eventrouter. Defaults to 'latest'.
+- `openshift_logging_eventrouter_sink`: Select a sink for eventrouter, supported 'stdout' and 'glog'. Defaults to 'stdout'.
+- `openshift_logging_eventrouter_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
+- `openshift_logging_eventrouter_cpu_limit`: The amount of CPU to allocate to eventrouter. Defaults to '100m'.
+- `openshift_logging_eventrouter_memory_limit`: The memory limit for eventrouter pods. Defaults to '128Mi'.
+- `openshift_logging_eventrouter_namespace`: The namespace where eventrouter is deployed. Defaults to 'default'.
diff --git a/roles/openshift_logging_eventrouter/defaults/main.yaml b/roles/openshift_logging_eventrouter/defaults/main.yaml
new file mode 100644
index 000000000..34e33f75f
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/defaults/main.yaml
@@ -0,0 +1,9 @@
+---
+openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}"
+openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version | default('latest') }}"
+openshift_logging_eventrouter_replicas: 1
+openshift_logging_eventrouter_sink: stdout
+openshift_logging_eventrouter_nodeselector: ""
+openshift_logging_eventrouter_cpu_limit: 100m
+openshift_logging_eventrouter_memory_limit: 128Mi
+openshift_logging_eventrouter_namespace: default
diff --git a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml
new file mode 100644
index 000000000..91708e54b
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml
@@ -0,0 +1,103 @@
+# this openshift template should match (except nodeSelector) jinja2 template in
+# ../templates/eventrouter-template.j2
+kind: Template
+apiVersion: v1
+metadata:
+ name: eventrouter-template
+ annotations:
+ description: "A pod forwarding kubernetes events to EFK aggregated logging stack."
+ tags: "events,EFK,logging"
+objects:
+ - kind: ServiceAccount
+ apiVersion: v1
+ metadata:
+ name: aggregated-logging-eventrouter
+ - kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: event-reader
+ rules:
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "watch", "list"]
+ - kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: logging-eventrouter
+ data:
+ config.json: |-
+ {
+ "sink": "${SINK}"
+ }
+ - kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: logging-eventrouter
+ labels:
+ component: eventrouter
+ logging-infra: eventrouter
+ provider: openshift
+ spec:
+ selector:
+ component: eventrouter
+ logging-infra: eventrouter
+ provider: openshift
+ replicas: ${REPLICAS}
+ template:
+ metadata:
+ labels:
+ component: eventrouter
+ logging-infra: eventrouter
+ provider: openshift
+ name: logging-eventrouter
+ spec:
+ serviceAccount: aggregated-logging-eventrouter
+ serviceAccountName: aggregated-logging-eventrouter
+ containers:
+ - name: kube-eventrouter
+ image: ${IMAGE}
+ imagePullPolicy: Always
+ resources:
+ limits:
+ memory: ${MEMORY}
+ cpu: ${CPU}
+ requires:
+ memory: ${MEMORY}
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/eventrouter
+ volumes:
+ - name: config-volume
+ configMap:
+ name: logging-eventrouter
+ - kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: event-reader-binding
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-eventrouter
+ namespace: ${NAMESPACE}
+ roleRef:
+ kind: ClusterRole
+ name: event-reader
+
+parameters:
+ - name: SINK
+ displayName: Sink
+ value: stdout
+ - name: REPLICAS
+ displayName: Replicas
+ value: "1"
+ - name: IMAGE
+ displayName: Image
+ value: "docker.io/openshift/origin-logging-eventrouter:latest"
+ - name: MEMORY
+ displayName: Memory
+ value: "128Mi"
+ - name: CPU
+ displayName: CPU
+ value: "100m"
+ - name: NAMESPACE
+ displayName: Namespace
+ value: default
diff --git a/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml
new file mode 100644
index 000000000..cf0abbde9
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml
@@ -0,0 +1,40 @@
+---
+# delete eventrouter
+- name: Delete EventRouter service account
+ oc_serviceaccount:
+ state: absent
+ name: "aggregated-logging-eventrouter"
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+
+- name: Delete event-reader cluster role
+ oc_clusterrole:
+ state: absent
+ name: event-reader
+
+- name: Unset privileged permissions for EventRouter
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+ resource_kind: cluster-role
+ resource_name: event-reader
+ state: absent
+ user: "system:serviceaccount:{{ openshift_logging_eventrouter_namespace }}:aggregated-logging-eventrouter"
+
+- name: Delete EventRouter configmap
+ oc_configmap:
+ state: absent
+ name: logging-eventrouter
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+
+- name: Delete EventRouter DC
+ oc_obj:
+ state: absent
+ name: logging-eventrouter
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+ kind: dc
+
+- name: Delete EventRouter Template
+ oc_obj:
+ state: absent
+ name: eventrouter-template
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+ kind: template
diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
new file mode 100644
index 000000000..8df7435e2
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
@@ -0,0 +1,59 @@
+---
+# initial checks
+- assert:
+ msg: Invalid sink type "{{openshift_logging_eventrouter_sink}}", only one of "{{__eventrouter_sinks}}" allowed
+ that: openshift_logging_eventrouter_sink in __eventrouter_sinks
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# create EventRouter deployment config
+- name: Generate EventRouter template
+ template:
+ src: eventrouter-template.j2
+ dest: "{{ tempdir }}/templates/eventrouter-template.yaml"
+ vars:
+ node_selector: "{{ openshift_logging_eventrouter_nodeselector | default({}) }}"
+
+- name: Create EventRouter template
+ oc_obj:
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+ kind: template
+ name: eventrouter-template
+ state: present
+ files:
+ - "{{ tempdir }}/templates/eventrouter-template.yaml"
+
+- name: Process EventRouter template
+ oc_process:
+ state: present
+ template_name: eventrouter-template
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+ params:
+ IMAGE: "{{openshift_logging_eventrouter_image_prefix}}logging-eventrouter:{{openshift_logging_eventrouter_image_version}}"
+ REPLICAS: "{{ openshift_logging_eventrouter_replicas }}"
+ CPU: "{{ openshift_logging_eventrouter_cpu_limit }}"
+ MEMORY: "{{ openshift_logging_eventrouter_memory_limit }}"
+ NAMESPACE: "{{ openshift_logging_eventrouter_namespace }}"
+ SINK: "{{ openshift_logging_eventrouter_sink }}"
+
+## Placeholder for migration when necessary ##
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml
new file mode 100644
index 000000000..58e5a559f
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/tasks/main.yaml
@@ -0,0 +1,6 @@
+---
+- include: "{{ role_path }}/tasks/install_eventrouter.yaml"
+ when: openshift_logging_install_eventrouter | default(false) | bool
+
+- include: "{{ role_path }}/tasks/delete_eventrouter.yaml"
+ when: not openshift_logging_install_eventrouter | default(false) | bool
diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
new file mode 100644
index 000000000..9ff4c7e80
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
@@ -0,0 +1,109 @@
+# this jinja2 template should always match (except nodeSelector) openshift template in
+# ../files/eventrouter-template.yaml
+kind: Template
+apiVersion: v1
+metadata:
+ name: eventrouter-template
+ annotations:
+ description: "A pod forwarding kubernetes events to EFK aggregated logging stack."
+ tags: "events,EFK,logging"
+objects:
+ - kind: ServiceAccount
+ apiVersion: v1
+ metadata:
+ name: aggregated-logging-eventrouter
+ - kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: event-reader
+ rules:
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "watch", "list"]
+ - kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: logging-eventrouter
+ data:
+ config.json: |-
+ {
+ "sink": "${SINK}"
+ }
+ - kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: logging-eventrouter
+ labels:
+ component: eventrouter
+ logging-infra: eventrouter
+ provider: openshift
+ spec:
+ selector:
+ component: eventrouter
+ logging-infra: eventrouter
+ provider: openshift
+ replicas: ${REPLICAS}
+ template:
+ metadata:
+ labels:
+ component: eventrouter
+ logging-infra: eventrouter
+ provider: openshift
+ name: logging-eventrouter
+ spec:
+ serviceAccount: aggregated-logging-eventrouter
+ serviceAccountName: aggregated-logging-eventrouter
+{% if node_selector is iterable and node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{ key }}: "{{ value }}"
+{% endfor %}
+{% endif %}
+ containers:
+ - name: kube-eventrouter
+ image: ${IMAGE}
+ imagePullPolicy: Always
+ resources:
+ limits:
+ memory: ${MEMORY}
+ cpu: ${CPU}
+ requires:
+ memory: ${MEMORY}
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/eventrouter
+ volumes:
+ - name: config-volume
+ configMap:
+ name: logging-eventrouter
+ - kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: event-reader-binding
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-eventrouter
+ namespace: ${NAMESPACE}
+ roleRef:
+ kind: ClusterRole
+ name: event-reader
+
+parameters:
+ - name: SINK
+ displayName: Sink
+ value: stdout
+ - name: REPLICAS
+ displayName: Replicas
+ value: "1"
+ - name: IMAGE
+ displayName: Image
+ value: "docker.io/openshift/origin-logging-eventrouter:latest"
+ - name: MEMORY
+ displayName: Memory
+ value: "128Mi"
+ - name: CPU
+ displayName: CPU
+ value: "100m"
+ - name: NAMESPACE
+ displayName: Namespace
+ value: default
diff --git a/roles/openshift_logging_eventrouter/vars/main.yaml b/roles/openshift_logging_eventrouter/vars/main.yaml
new file mode 100644
index 000000000..bdf561fe3
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/vars/main.yaml
@@ -0,0 +1,2 @@
+---
+__eventrouter_sinks: ["glog", "stdout"]
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index 30d3d854a..82326bdd1 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -50,8 +50,6 @@ openshift_logging_fluentd_aggregating_key_path: none
openshift_logging_fluentd_aggregating_passphrase: none
### Deprecating in 3.6
-openshift_logging_fluentd_es_copy: false
-
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
#fluentd_config_contents:
#fluentd_throttle_contents:
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 74b4d7db4..37960afd1 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -1,5 +1,8 @@
---
- fail:
+ msg: The ES_COPY feature is no longer supported. Please remove the variable from your inventory
+ when: openshift_logging_fluentd_es_copy is defined
+- fail:
msg: Only one Fluentd nodeselector key pair should be provided
when: openshift_logging_fluentd_nodeselector.keys() | count > 1
diff --git a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/fluent.conf.j2
index 46de94d60..6e07b403a 100644
--- a/roles/openshift_logging_fluentd/templates/fluent.conf.j2
+++ b/roles/openshift_logging_fluentd/templates/fluent.conf.j2
@@ -49,7 +49,9 @@
@include configs.d/openshift/filter-viaq-data-model.conf
@include configs.d/openshift/filter-post-*.conf
##
+</label>
+<label @OUTPUT>
## matches
@include configs.d/openshift/output-pre-*.conf
@include configs.d/openshift/output-operations.conf
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index a4afb6618..b5f27b60d 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -94,8 +94,6 @@ spec:
value: "{{ openshift_logging_fluentd_ops_client_key }}"
- name: "OPS_CA"
value: "{{ openshift_logging_fluentd_ops_ca }}"
- - name: "ES_COPY"
- value: "false"
- name: "JOURNAL_SOURCE"
value: "{{ openshift_logging_fluentd_journal_source | default('') }}"
- name: "JOURNAL_READ_FROM_HEAD"
@@ -120,6 +118,56 @@ spec:
- name: "MUX_CLIENT_MODE"
value: "{{ openshift_logging_mux_client_mode }}"
{% endif %}
+{% if openshift_logging_install_eventrouter is defined and openshift_logging_install_eventrouter %}
+ - name: "TRANSFORM_EVENTS"
+ value: "true"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog is defined and openshift_logging_fluentd_remote_syslog %}
+ - name: USE_REMOTE_SYSLOG
+ value: "true"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_host is defined %}
+ - name: REMOTE_SYSLOG_HOST
+ value: "{{ openshift_logging_fluentd_remote_syslog_host }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_port is defined %}
+ - name: REMOTE_SYSLOG_PORT
+ value: "{{ openshift_logging_fluentd_remote_syslog_port }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_severity is defined %}
+ - name: REMOTE_SYSLOG_SEVERITY
+ value: "{{ openshift_logging_fluentd_remote_syslog_severity }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_facility is defined %}
+ - name: REMOTE_SYSLOG_FACILITY
+ value: "{{ openshift_logging_fluentd_remote_syslog_facility }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_remove_tag_prefix is defined %}
+ - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX
+ value: "{{ openshift_logging_fluentd_remote_syslog_remove_tag_prefix }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_tag_key is defined %}
+ - name: REMOTE_SYSLOG_TAG_KEY
+ value: "{{ openshift_logging_fluentd_remote_syslog_tag_key }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_use_record is defined %}
+ - name: REMOTE_SYSLOG_USE_RECORD
+ value: "{{ openshift_logging_fluentd_remote_syslog_use_record }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_payload_key is defined %}
+ - name: REMOTE_SYSLOG_PAYLOAD_KEY
+ value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}"
+{% endif %}
+
volumes:
- name: runlogjournal
hostPath:
diff --git a/roles/openshift_logging_mux/files/fluent.conf b/roles/openshift_logging_mux/files/fluent.conf
index aeaa705ee..bf61c9811 100644
--- a/roles/openshift_logging_mux/files/fluent.conf
+++ b/roles/openshift_logging_mux/files/fluent.conf
@@ -25,7 +25,9 @@
@include configs.d/openshift/filter-viaq-data-model.conf
@include configs.d/openshift/filter-post-*.conf
##
+</label>
+<label @OUTPUT>
## matches
@include configs.d/openshift/output-pre-*.conf
@include configs.d/openshift/output-operations.conf
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index ff18d3270..4cc48139f 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -119,6 +119,52 @@ spec:
resource: limits.memory
- name: "FILE_BUFFER_LIMIT"
value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}"
+
+{% if openshift_logging_mux_remote_syslog is defined and openshift_logging_mux_remote_syslog %}
+ - name: USE_REMOTE_SYSLOG
+ value: "true"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_host is defined %}
+ - name: REMOTE_SYSLOG_HOST
+ value: "{{ openshift_logging_mux_remote_syslog_host }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_port is defined %}
+ - name: REMOTE_SYSLOG_PORT
+ value: "{{ openshift_logging_mux_remote_syslog_port }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_severity is defined %}
+ - name: REMOTE_SYSLOG_SEVERITY
+ value: "{{ openshift_logging_mux_remote_syslog_severity }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_facility is defined %}
+ - name: REMOTE_SYSLOG_FACILITY
+ value: "{{ openshift_logging_mux_remote_syslog_facility }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_remove_tag_prefix is defined %}
+ - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX
+ value: "{{ openshift_logging_mux_remote_syslog_remove_tag_prefix }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_tag_key is defined %}
+ - name: REMOTE_SYSLOG_TAG_KEY
+ value: "{{ openshift_logging_mux_remote_syslog_tag_key }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_use_record is defined %}
+ - name: REMOTE_SYSLOG_USE_RECORD
+ value: "{{ openshift_logging_mux_remote_syslog_use_record }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_payload_key is defined %}
+ - name: REMOTE_SYSLOG_PAYLOAD_KEY
+ value: "{{ openshift_logging_mux_remote_syslog_payload_key }}"
+{% endif %}
+
volumes:
- name: config
configMap:
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 7ccc2fc3b..f142f89f0 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -3,6 +3,9 @@ manage_iq_tasks:
- resource_kind: role
resource_name: admin
user: management-admin
+- resource_kind: role
+ resource_name: admin
+ user: system:serviceaccount:management-infra:management-admin
- resource_kind: cluster-role
resource_name: management-infra-admin
user: system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index 86fa57b50..2dcc56e3f 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -1,4 +1,4 @@
-OpenShift/Atomic Enterprise Master
+OpenShift Master
==================================
Master service installation
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 71bb09a76..73e935d3f 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -20,11 +20,11 @@ r_openshift_master_os_firewall_allow:
port: 4001/tcp
cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
-oreg_url: ''
-oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+# oreg_url is defined by user input
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
-
+l_bind_docker_reg_auth: False
# NOTE
# r_openshift_master_*_default may be defined external to this role.
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index b0237141b..a657668a9 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -14,19 +14,3 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_os_firewall
-- role: openshift_master_facts
-- role: openshift_hosted_facts
-- role: openshift_master_certificates
-- role: openshift_etcd_client_certificates
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- when: groups.oo_etcd_to_config | default([]) | length != 0
-- role: openshift_clock
-- role: openshift_cloud_provider
-- role: openshift_builddefaults
-- role: openshift_buildoverrides
-- role: nickhammond.logrotate
-- role: contiv
- contiv_role: netmaster
- when: openshift_use_contiv | default(False) | bool
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 121261e94..82b4b420c 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -180,6 +180,28 @@
- name: Install the systemd units
include: systemd_units.yml
+- name: Checking for journald.conf
+ stat: path=/etc/systemd/journald.conf
+ register: journald_conf_file
+
+- name: Update journald setup
+ replace:
+ dest: /etc/systemd/journald.conf
+ regexp: '^(\#| )?{{ item.var }}=\s*.*?$'
+ replace: ' {{ item.var }}={{ item.val }}'
+ backup: yes
+ with_items: "{{ journald_vars_to_replace | default([]) }}"
+ when: journald_conf_file.stat.exists
+ register: journald_update
+
+# I need to restart journald immediatelly, otherwise it gets into way during
+# further steps in ansible
+- name: Restart journald
+ systemd:
+ name: systemd-journald
+ state: restarted
+ when: journald_update | changed
+
- name: Install Master system container
include: system_container.yml
when:
@@ -200,7 +222,7 @@
- restart master api
- set_fact:
- translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}"
+ translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}"
# TODO: add the validate parameter when there is a validation command to run
- name: Create master config
@@ -229,22 +251,6 @@
- restart master controllers
when: openshift_master_bootstrap_enabled | default(False)
-- name: Check for credentials file for registry auth
- stat:
- path: "{{oreg_auth_credentials_path }}"
- when:
- - oreg_auth_user is defined
- register: master_oreg_auth_credentials_stat
-
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - oreg_auth_user is defined
- - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- notify:
- - restart master api
- - restart master controllers
-
- include: set_loopback_context.yml
when:
- openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
new file mode 100644
index 000000000..2644f235e
--- /dev/null
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -0,0 +1,35 @@
+---
+# We need to setup some variables as this play might be called directly
+# from outside of the role.
+- set_fact:
+ oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
+ when: oreg_auth_credentials_path is not defined
+
+- set_fact:
+ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+ when: oreg_host is not defined
+
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ oreg_auth_credentials_path }}"
+ when: oreg_auth_user is defined
+ register: master_oreg_auth_credentials_stat
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: master_oreg_auth_credentials_create
+ notify:
+ - restart master api
+ - restart master controllers
+
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
+ when:
+ - openshift.common.is_containerized | bool
+ - oreg_auth_user is defined
+ - (master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or master_oreg_auth_credentials_create.changed) | bool
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 7a918c57e..8de62c59a 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -17,6 +17,8 @@
r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
when: r_openshift_master_data_dir is not defined
+- include: registry_auth.yml
+
- name: Remove the legacy master service if it exists
include: clean_systemd_units.yml
diff --git a/roles/openshift_master/tasks/update_etcd_client_urls.yml b/roles/openshift_master/tasks/update_etcd_client_urls.yml
new file mode 100644
index 000000000..1ab105808
--- /dev/null
+++ b/roles/openshift_master/tasks/update_etcd_client_urls.yml
@@ -0,0 +1,8 @@
+---
+- yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: 'etcdClientInfo.urls'
+ value: "{{ openshift.master.etcd_urls }}"
+ notify:
+ - restart master api
+ - restart master controllers
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index f06448d71..5d4a99c97 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,7 +12,17 @@ Requires={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host \
+ --name {{ openshift.common.service_type }}-master-api \
+ --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \
+ -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
+ -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \
+ -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
+ --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index b7f36491b..f93f3b565 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -11,7 +11,17 @@ PartOf={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host \
+ --name {{ openshift.common.service_type }}-master-controllers \
+ --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \
+ -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
+ --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
LimitNOFILE=131072
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index cf39b73f6..0c681c764 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -20,3 +20,22 @@ openshift_master_valid_grant_methods:
- deny
openshift_master_is_scaleup_host: False
+
+# These defaults assume forcing journald persistence, fsync to disk once
+# a second, rate-limiting to 10,000 logs a second, no forwarding to
+# syslog or wall, using 8GB of disk space maximum, using 10MB journal
+# files, keeping only a days worth of logs per journal file, and
+# retaining journal files no longer than a month.
+journald_vars_to_replace:
+- { var: Storage, val: persistent }
+- { var: Compress, val: yes }
+- { var: SyncIntervalSec, val: 1s }
+- { var: RateLimitInterval, val: 1s }
+- { var: RateLimitBurst, val: 10000 }
+- { var: SystemMaxUse, val: 8G }
+- { var: SystemKeepFree, val: 20% }
+- { var: SystemMaxFileSize, val: 10M }
+- { var: MaxRetentionSec, val: 1month }
+- { var: MaxFileSec, val: 1day }
+- { var: ForwardToSyslog, val: no }
+- { var: ForwardToWall, val: no }
diff --git a/roles/openshift_master_certificates/meta/main.yml b/roles/openshift_master_certificates/meta/main.yml
index 018186e86..300b2cbff 100644
--- a/roles/openshift_master_certificates/meta/main.yml
+++ b/roles/openshift_master_certificates/meta/main.yml
@@ -12,6 +12,4 @@ galaxy_info:
categories:
- cloud
- system
-dependencies:
-- role: openshift_master_facts
-- role: openshift_ca
+dependencies: []
diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml
index a80313505..d0dcdae4b 100644
--- a/roles/openshift_master_facts/defaults/main.yml
+++ b/roles/openshift_master_facts/defaults/main.yml
@@ -1,5 +1,5 @@
---
-openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
+openshift_master_default_subdomain: "router.default.svc.cluster.local"
openshift_master_admission_plugin_config:
openshift.io/ImagePolicy:
configuration:
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index e767772ce..f7f3ac2b1 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -6,10 +6,6 @@ Custom filters for use in openshift-master
import copy
import sys
-# pylint import-error disabled because pylint cannot find the package
-# when installed in a virtualenv
-from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
-
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
@@ -82,23 +78,8 @@ class IdentityProviderBase(object):
self._allow_additional = True
@staticmethod
- def validate_idp_list(idp_list, openshift_version, deployment_type):
+ def validate_idp_list(idp_list):
''' validates a list of idps '''
- login_providers = [x.name for x in idp_list if x.login]
-
- multiple_logins_unsupported = False
- if len(login_providers) > 1:
- if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']:
- if LooseVersion(openshift_version) < LooseVersion('3.2'):
- multiple_logins_unsupported = True
- if deployment_type in ['origin']:
- if LooseVersion(openshift_version) < LooseVersion('1.2'):
- multiple_logins_unsupported = True
- if multiple_logins_unsupported:
- raise errors.AnsibleFilterError("|failed multiple providers are "
- "not allowed for login. login "
- "providers: {0}".format(', '.join(login_providers)))
-
names = [x.name for x in idp_list]
if len(set(names)) != len(names):
raise errors.AnsibleFilterError("|failed more than one provider configured with the same name")
@@ -380,11 +361,6 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase):
if 'extra_authorize_parameters' in self._idp:
self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
- if 'extraAuthorizeParameters' in self._idp:
- if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
- val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
- self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val
-
def validate(self):
''' validate this idp instance '''
IdentityProviderOauthBase.validate(self)
@@ -476,7 +452,7 @@ class FilterModule(object):
''' Custom ansible filters for use by the openshift_master role'''
@staticmethod
- def translate_idps(idps, api_version, openshift_version, deployment_type):
+ def translate_idps(idps, api_version):
''' Translates a list of dictionaries into a valid identityProviders config '''
idp_list = []
@@ -492,7 +468,7 @@ class FilterModule(object):
idp_inst.set_provider_items()
idp_list.append(idp_inst)
- IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)
+ IdentityProviderBase.validate_idp_list(idp_list)
return u(yaml.dump([idp.to_dict() for idp in idp_list],
allow_unicode=True,
default_flow_style=False,
diff --git a/roles/openshift_master_facts/lookup_plugins/oo_option.py b/roles/openshift_master_facts/lookup_plugins/oo_option.py
deleted file mode 120000
index 5ae43f8dd..000000000
--- a/roles/openshift_master_facts/lookup_plugins/oo_option.py
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins/oo_option.py \ No newline at end of file
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
index c45f255af..f27eb629d 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -101,7 +101,7 @@ class LookupModule(LookupBase):
{'name': 'MatchInterPodAffinity'}
])
- if short_version in ['3.5', '3.6', '3.7']:
+ if short_version in ['3.5', '3.6']:
predicates.extend([
{'name': 'NoVolumeZoneConflict'},
{'name': 'MaxEBSVolumeCount'},
@@ -114,6 +114,21 @@ class LookupModule(LookupBase):
{'name': 'CheckNodeDiskPressure'},
])
+ if short_version in ['3.7']:
+ predicates.extend([
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MaxAzureDiskVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'NoVolumeNodeConflict'},
+ ])
+
if regions_enabled:
region_predicate = {
'name': 'Region',
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index fa228af2a..a95570d38 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -1,5 +1,4 @@
---
-
# Ensure the default sub-domain is set:
- name: Migrate legacy osm_default_subdomain fact
set_fact:
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
index 4a28fb8f8..38a918803 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -57,6 +57,20 @@ DEFAULT_PREDICATES_1_5 = [
DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5
+DEFAULT_PREDICATES_3_7 = [
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MaxAzureDiskVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'NoVolumeNodeConflict'},
+]
+
REGION_PREDICATE = {
'name': 'Region',
'argument': {
@@ -79,6 +93,8 @@ TEST_VARS = [
('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
('3.6', 'origin', DEFAULT_PREDICATES_3_6),
('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6),
+ ('3.7', 'origin', DEFAULT_PREDICATES_3_7),
+ ('3.7', 'openshift-enterprise', DEFAULT_PREDICATES_3_7),
]
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index 1f10de4a2..ed698daca 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -39,6 +39,8 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_metrics_hawkular_replicas:` The number of replicas for Hawkular metrics.
+- `openshift_metrics_hawkular_route_annotations`: Dictionary with annotations for the Hawkular route.
+
- `openshift_metrics_cassandra_replicas`: The number of Cassandra nodes to deploy for the
initial cluster.
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index d9a17ae7f..084b734ee 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -1,6 +1,7 @@
---
openshift_metrics_start_cluster: True
-openshift_metrics_install_metrics: True
+openshift_metrics_install_metrics: False
+openshift_metrics_uninstall_metrics: False
openshift_metrics_startup_timeout: 500
openshift_metrics_hawkular_replicas: 1
@@ -12,11 +13,12 @@ openshift_metrics_hawkular_cert: ""
openshift_metrics_hawkular_key: ""
openshift_metrics_hawkular_ca: ""
openshift_metrics_hawkular_nodeselector: ""
+openshift_metrics_hawkular_route_annotations: {}
openshift_metrics_cassandra_replicas: 1
-openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}"
-openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}"
-openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default('') }}"
+openshift_metrics_cassandra_storage_type: "{{ openshift_metrics_storage_kind | default('emptydir') }}"
+openshift_metrics_cassandra_pvc_size: "{{ openshift_metrics_storage_volume_size | default('10Gi') }}"
+openshift_metrics_cassandra_pv_selector: "{{ openshift_metrics_storage_labels | default('') }}"
openshift_metrics_cassandra_limits_memory: 2G
openshift_metrics_cassandra_limits_cpu: null
openshift_metrics_cassandra_requests_memory: 1G
@@ -53,9 +55,12 @@ openshift_metrics_master_url: https://kubernetes.default.svc
openshift_metrics_node_id: nodename
openshift_metrics_project: openshift-infra
-openshift_metrics_cassandra_pvc_prefix: "{{ openshift_hosted_metrics_storage_volume_name | default('metrics-cassandra') }}"
-openshift_metrics_cassandra_pvc_access: "{{ openshift_hosted_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"
+openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}"
+openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"
openshift_metrics_hawkular_user_write_access: False
openshift_metrics_heapster_allowed_users: system:master-proxy
+
+openshift_metrics_cassandra_enable_prometheus_endpoint: True
+openshift_metrics_hawkular_enable_prometheus_endpoint: True
diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml
index 6b37f85ab..b63f5ca8c 100644
--- a/roles/openshift_metrics/tasks/install_hawkular.yaml
+++ b/roles/openshift_metrics/tasks/install_hawkular.yaml
@@ -40,6 +40,7 @@
dest: "{{ mktemp.stdout }}/templates/hawkular-metrics-route.yaml"
vars:
name: hawkular-metrics
+ annotations: "{{ openshift_metrics_hawkular_route_annotations }}"
labels:
metrics-infra: hawkular-metrics
host: "{{ openshift_metrics_hawkular_hostname }}"
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index eaabdd20f..c92458c50 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -43,7 +43,13 @@
check_mode: no
tags: metrics_init
-- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}"
+- include: install_metrics.yaml
+ when:
+ - openshift_metrics_install_metrics | bool
+
+- include: uninstall_metrics.yaml
+ when:
+ - openshift_metrics_uninstall_metrics | bool
- include: uninstall_hosa.yaml
when: not openshift_metrics_install_hawkular_agent | bool
diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml
index 2e2013d40..d6756f9b9 100644
--- a/roles/openshift_metrics/tasks/pre_install.yaml
+++ b/roles/openshift_metrics/tasks/pre_install.yaml
@@ -10,7 +10,7 @@
is invalid, must be one of: emptydir, pv, dynamic
when:
- openshift_metrics_cassandra_storage_type not in openshift_metrics_cassandra_storage_types
- - "not {{ openshift_metrics_heapster_standalone | bool }}"
+ - not (openshift_metrics_heapster_standalone | bool)
- name: list existing secrets
command: >
diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
index fc82f49b1..6f341bcfb 100644
--- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
@@ -56,6 +56,8 @@ spec:
value: "/cassandra_data"
- name: JVM_OPTS
value: "-Dcassandra.commitlog.ignorereplayerrors=true"
+ - name: ENABLE_PROMETHEUS_ENDPOINT
+ value: "{{ openshift_metrics_cassandra_enable_prometheus_endpoint }}"
- name: TRUSTSTORE_NODES_AUTHORITIES
value: "/hawkular-cassandra-certs/tls.peer.truststore.crt"
- name: TRUSTSTORE_CLIENT_AUTHORITIES
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index 9a9363075..59f7fb44a 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -55,6 +55,7 @@ spec:
- "-Dcom.datastax.driver.FORCE_NIO=true"
- "-DKUBERNETES_MASTER_URL={{openshift_metrics_master_url}}"
- "-DUSER_WRITE_ACCESS={{openshift_metrics_hawkular_user_write_access}}"
+ - "-Dhawkular.metrics.jmx-reporting-enabled"
env:
- name: POD_NAMESPACE
valueFrom:
@@ -66,6 +67,8 @@ spec:
value: "{{ 17 | oo_random_word }}"
- name: TRUSTSTORE_AUTHORITIES
value: "/hawkular-metrics-certs/tls.truststore.crt"
+ - name: ENABLE_PROMETHEUS_ENDPOINT
+ value: "{{ openshift_metrics_hawkular_enable_prometheus_endpoint }}"
- name: OPENSHIFT_KUBE_PING_NAMESPACE
valueFrom:
fieldRef:
diff --git a/roles/openshift_metrics/templates/route.j2 b/roles/openshift_metrics/templates/route.j2
index 423ab54a3..253d6ecf5 100644
--- a/roles/openshift_metrics/templates/route.j2
+++ b/roles/openshift_metrics/templates/route.j2
@@ -2,6 +2,9 @@ apiVersion: v1
kind: Route
metadata:
name: {{ name }}
+{% if annotations is defined %}
+ annotations: {{ annotations | to_yaml }}
+{% endif %}
{% if labels is defined and labels %}
labels:
{% for k, v in labels.iteritems() %}
diff --git a/roles/openshift_metrics/vars/default_images.yml b/roles/openshift_metrics/vars/default_images.yml
index 678c4104c..8704ddfa0 100644
--- a/roles/openshift_metrics/vars/default_images.yml
+++ b/roles/openshift_metrics/vars/default_images.yml
@@ -1,3 +1,3 @@
---
-__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('docker.io/openshift/origin-') }}"
-__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default('latest') }}"
+__openshift_metrics_image_prefix: "docker.io/openshift/origin-"
+__openshift_metrics_image_version: "latest"
diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml
index f0bdac7d2..68cdf06fe 100644
--- a/roles/openshift_metrics/vars/openshift-enterprise.yml
+++ b/roles/openshift_metrics/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
-__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('v3.6') }}"
+__openshift_metrics_image_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_metrics_image_version: "v3.6"
diff --git a/roles/openshift_named_certificates/defaults/main.yml b/roles/openshift_named_certificates/defaults/main.yml
new file mode 100644
index 000000000..a32e385ec
--- /dev/null
+++ b/roles/openshift_named_certificates/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+openshift_ca_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
+openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
+openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
+openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_named_certificates/vars/main.yml b/roles/openshift_named_certificates/vars/main.yml
index 368e9bdac..7f891441d 100644
--- a/roles/openshift_named_certificates/vars/main.yml
+++ b/roles/openshift_named_certificates/vars/main.yml
@@ -1,10 +1,4 @@
---
-openshift_ca_config_dir: "{{ openshift.common.config_base }}/master"
-openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
-openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
-openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
-openshift_version: "{{ openshift_pkg_version | default('') }}"
-
overwrite_named_certs: "{{ openshift_master_overwrite_named_certificates | default(false) }}"
named_certs_dir: "{{ openshift.common.config_base }}/master/named_certificates/"
internal_hostnames: "{{ openshift.common.internal_hostnames }}"
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 32670b18e..67f697924 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -1,4 +1,4 @@
-OpenShift/Atomic Enterprise Node
+OpenShift Node
================================
Node service installation
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index f1e64f3aa..1214c08e5 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -31,12 +31,9 @@ openshift_node_ami_prep_packages:
- python-dbus
- PyYAML
- yum-utils
-- python2-boto
-- python2-boto3
- cloud-utils-growpart
# gluster
- glusterfs-fuse
-- heketi-client
# nfs
- nfs-utils
- flannel
@@ -60,7 +57,7 @@ openshift_deployment_type: origin
openshift_node_bootstrap: False
r_openshift_node_os_firewall_deny: []
-r_openshift_node_os_firewall_allow:
+default_r_openshift_node_os_firewall_allow:
- service: Kubernetes kubelet
port: 10250/tcp
- service: http
@@ -79,12 +76,14 @@ r_openshift_node_os_firewall_allow:
- service: Kubernetes service NodePort UDP
port: "{{ openshift_node_port_range | default('') }}/udp"
cond: "{{ openshift_node_port_range is defined }}"
+# Allow multiple port ranges to be added to the role
+r_openshift_node_os_firewall_allow: "{{ default_r_openshift_node_os_firewall_allow | union(openshift_node_open_ports | default([])) }}"
-oreg_url: ''
-oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+# oreg_url is defined by user input
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
-
+l_bind_docker_reg_auth: False
# NOTE
# r_openshift_node_*_default may be defined external to this role.
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 855b0a8d8..25a6fc721 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -29,8 +29,5 @@
- not (node_service_status_changed | default(false) | bool)
- not openshift_node_bootstrap
-- name: reload sysctl.conf
- command: /sbin/sysctl -p
-
- name: reload systemd units
command: systemctl daemon-reload
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index ce5ecb9d0..5bc7b9869 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -17,7 +17,5 @@ dependencies:
- role: lib_os_firewall
- role: openshift_clock
- role: openshift_docker
-- role: openshift_node_certificates
- when: not openshift_node_bootstrap
- role: openshift_cloud_provider
- role: openshift_node_dnsmasq
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 7af3f54b5..2759188f3 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -2,18 +2,6 @@
- name: Install the systemd units
include: systemd_units.yml
-- name: Check for tuned package
- command: rpm -q tuned
- args:
- warn: no
- register: tuned_installed
- changed_when: false
- failed_when: false
-
-- name: Set atomic-guest tuned profile
- command: "tuned-adm profile atomic-guest"
- when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool
-
- name: Start and enable openvswitch service
systemd:
name: openvswitch.service
@@ -107,5 +95,9 @@
msg: Node failed to start please inspect the logs and try again
when: node_start_result | failed
+- name: Setup tuned
+ include: tuned.yml
+ static: yes
+
- set_fact:
node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 02b8ee67c..265bf2c46 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -1,11 +1,9 @@
---
-# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
-# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
- when: not openshift.common.is_containerized | bool
block:
- name: Install Node package
package:
- name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- name: Install sdn-ovs package
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 22ff6dfd2..e82fb42b8 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -2,7 +2,8 @@
- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
when:
- - (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
+ - (not ansible_selinux or ansible_selinux.status != 'enabled')
+ - deployment_type == 'openshift-enterprise'
- not openshift_use_crio | default(false)
- name: setup firewall
@@ -59,38 +60,22 @@
# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
-#
-# Use lineinfile w/ a handler for this task until
-# https://github.com/ansible/ansible/pull/24277 is included in an
-# ansible release and we can use the sysctl module.
-- name: Persist net.ipv4.ip_forward sysctl entry
- lineinfile: dest=/etc/sysctl.conf regexp='^net.ipv4.ip_forward' line='net.ipv4.ip_forward=1'
- notify:
- - reload sysctl.conf
+- sysctl:
+ name: net.ipv4.ip_forward
+ value: 1
+ sysctl_file: "/etc/sysctl.d/99-openshift.conf"
+ reload: yes
- name: include bootstrap node config
include: bootstrap.yml
when: openshift_node_bootstrap
+- include: registry_auth.yml
+
- name: include standard node config
include: config.yml
when: not openshift_node_bootstrap
-- name: Check for credentials file for registry auth
- stat:
- path: "{{oreg_auth_credentials_path }}"
- when:
- - oreg_auth_user is defined
- register: node_oreg_auth_credentials_stat
-
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - oreg_auth_user is defined
- - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- notify:
- - restart node
-
- name: Configure AWS Cloud Provider Settings
lineinfile:
dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index b2dceedbe..20d7a9539 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -9,4 +9,8 @@
oc_atomic_container:
name: "{{ openshift.common.service_type }}-node"
image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
+ values:
+ - "DNS_DOMAIN={{ openshift.common.dns_domain }}"
+ - "DOCKER_SERVICE={{ openshift.docker.service_name }}.service"
+ - "MASTER_SERVICE={{ openshift.common.service_type }}.service"
state: latest
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
new file mode 100644
index 000000000..de396fb4b
--- /dev/null
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -0,0 +1,24 @@
+---
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ oreg_auth_credentials_path }}"
+ when: oreg_auth_user is defined
+ register: node_oreg_auth_credentials_stat
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: node_oreg_auth_credentials_create
+ notify:
+ - restart node
+
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
+ when:
+ - openshift.common.is_containerized | bool
+ - oreg_auth_user is defined
+ - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 8734e7443..fa7238849 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service
{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
ExecStop=
SyslogIdentifier={{ openshift.common.service_type }}-node-dep
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 57094f28e..310d8b29d 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -21,7 +21,23 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
+ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
+ -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
+ -e HOST=/rootfs -e HOST_ETC=/host-etc \
+ -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \
+ -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \
+ -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \
+ -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \
+ -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \
+ -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \
+ -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \
+ {% if openshift_use_nuage | default(false) -%} $NUAGE_ADDTL_BIND_MOUNTS {% endif -%} \
+ -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml
index 93216c1d2..0440bf11a 100644
--- a/roles/openshift_node_certificates/meta/main.yml
+++ b/roles/openshift_node_certificates/meta/main.yml
@@ -12,5 +12,4 @@ galaxy_info:
categories:
- cloud
- system
-dependencies:
-- role: openshift_facts
+dependencies: []
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index 61d2a5b51..df02bcf0e 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -114,6 +114,8 @@ EOF
echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF}
if ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then
sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF}
+ elif ! grep -qw search ${NEW_RESOLV_CONF}; then
+ echo 'search cluster.local' >> ${NEW_RESOLV_CONF}
fi
cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf
fi
diff --git a/roles/openshift_node_dnsmasq/handlers/main.yml b/roles/openshift_node_dnsmasq/handlers/main.yml
index b4a0c3583..9f98126a0 100644
--- a/roles/openshift_node_dnsmasq/handlers/main.yml
+++ b/roles/openshift_node_dnsmasq/handlers/main.yml
@@ -3,6 +3,7 @@
systemd:
name: NetworkManager
state: restarted
+ enabled: True
- name: restart dnsmasq
systemd:
diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
index d5fda7bd0..8a7da66c2 100644
--- a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
+++ b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
@@ -1,2 +1,11 @@
---
- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation."
+ when: not openshift_node_bootstrap | bool
+
+- name: Install NetworkManager during node_bootstrap provisioning
+ package:
+ name: NetworkManager
+ state: present
+ notify: restart NetworkManager
+
+- include: ./network-manager.yml
diff --git a/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py
new file mode 100644
index 000000000..69069f2dc
--- /dev/null
+++ b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py
@@ -0,0 +1,32 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+Custom filters for use in openshift-node
+'''
+from ansible import errors
+
+
+class FilterModule(object):
+ ''' Custom ansible filters for use by openshift_node_facts role'''
+
+ @staticmethod
+ def node_get_dns_ip(openshift_dns_ip, hostvars):
+ ''' Navigates the complicated logic of when to set dnsIP
+
+ In all situations if they've set openshift_dns_ip use that
+ For 1.0/3.0 installs we use the openshift_master_cluster_vip, openshift_node_first_master_ip, else None
+ For 1.1/3.1 installs we use openshift_master_cluster_vip, else None (product will use kube svc ip)
+ For 1.2/3.2+ installs we set to the node's default interface ip
+ '''
+
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+
+ # We always use what they've specified if they've specified a value
+ if openshift_dns_ip is not None:
+ return openshift_dns_ip
+ return hostvars['ansible_default_ipv4']['address']
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {'node_get_dns_ip': self.node_get_dns_ip}
diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml
index c268c945e..0d5fa664c 100644
--- a/roles/openshift_node_facts/tasks/main.yml
+++ b/roles/openshift_node_facts/tasks/main.yml
@@ -1,10 +1,4 @@
---
-- set_fact:
- openshift_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}"
- when:
- - openshift_node_debug_level is not defined
- - lookup('oo_option', 'openshift_node_debug_level') != ""
-
- name: Set node facts
openshift_facts:
role: "{{ item.role }}"
@@ -20,7 +14,7 @@
debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
- labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
+ labels: "{{ openshift_node_labels | default(None) }}"
registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
@@ -30,5 +24,5 @@
ovs_image: "{{ osn_ovs_image | default(None) }}"
proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
- dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
+ dns_ip: "{{ openshift_dns_ip | default(none) | node_get_dns_ip(hostvars[inventory_hostname])}}"
env_vars: "{{ openshift_node_env_vars | default(None) }}"
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
index 5ad994df9..c7c0ff34a 100644
--- a/roles/openshift_node_upgrade/README.md
+++ b/roles/openshift_node_upgrade/README.md
@@ -1,4 +1,4 @@
-OpenShift/Atomic Enterprise Node upgrade
+OpenShift Node upgrade
=========
Role responsible for a single node upgrade.
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
index 3d8704308..6507b015d 100644
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ b/roles/openshift_node_upgrade/defaults/main.yml
@@ -4,3 +4,9 @@ os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
+
+# oreg_url is defined by user input
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
+oreg_auth_credentials_replace: False
+l_bind_docker_reg_auth: False
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index e34319186..6bcf3072d 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -10,6 +10,8 @@
# tasks file for openshift_node_upgrade
+- include: registry_auth.yml
+
- name: Stop node and openvswitch services
service:
name: "{{ item }}"
diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml
new file mode 100644
index 000000000..de396fb4b
--- /dev/null
+++ b/roles/openshift_node_upgrade/tasks/registry_auth.yml
@@ -0,0 +1,24 @@
+---
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ oreg_auth_credentials_path }}"
+ when: oreg_auth_user is defined
+ register: node_oreg_auth_credentials_stat
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: node_oreg_auth_credentials_create
+ notify:
+ - restart node
+
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
+ when:
+ - openshift.common.is_containerized | bool
+ - oreg_auth_user is defined
+ - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
index 4c47f8c0d..aae35719c 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
@@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
ExecStop=
SyslogIdentifier={{ openshift.common.service_type }}-node-dep
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index 451412ab0..864e4b5d6 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -21,7 +21,22 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
+ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
+ -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
+ -e HOST=/rootfs -e HOST_ETC=/host-etc \
+ -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \
+ -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \
+ -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \
+ -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \
+ -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \
+ -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \
+ -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \
+ -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml
index 8d3d010e4..19e9a56b7 100644
--- a/roles/openshift_persistent_volumes/meta/main.yml
+++ b/roles/openshift_persistent_volumes/meta/main.yml
@@ -9,5 +9,4 @@ galaxy_info:
- name: EL
versions:
- 7
-dependencies:
-- role: openshift_hosted_facts
+dependencies: {}
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index 18d6a1645..5aa8aecec 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -11,7 +11,7 @@ openshift_prometheus_node_selector: {"region":"infra"}
openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev"
-openshift_prometheus_image_alertbuffer: "ilackarms/message-buffer"
+openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1"
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index 93bdda3e8..a9bce2fb1 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -107,7 +107,10 @@
- name: annotate prometheus service
command: >
{{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
- service prometheus 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls'
+ service prometheus
+ prometheus.io/scrape='true'
+ prometheus.io/scheme=https
+ service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls
- name: annotate alerts service
command: >
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
index abd1997dd..ce3b51454 100644
--- a/roles/openshift_repos/README.md
+++ b/roles/openshift_repos/README.md
@@ -1,4 +1,4 @@
-OpenShift Repos
+OpenShift Repos
================
Configures repositories for an OpenShift installation
@@ -12,10 +12,10 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos.
Role Variables
--------------
-| Name | Default value | |
-|-------------------------------|---------------|------------------------------------|
-| openshift_deployment_type | None | Possible values enterprise, origin |
-| openshift_additional_repos | {} | TODO |
+| Name | Default value | |
+|-------------------------------|---------------|----------------------------------------------|
+| openshift_deployment_type | None | Possible values openshift-enterprise, origin |
+| openshift_additional_repos | {} | TODO |
Dependencies
------------
diff --git a/roles/openshift_repos/tasks/centos_repos.yml b/roles/openshift_repos/tasks/centos_repos.yml
new file mode 100644
index 000000000..7dc15af2a
--- /dev/null
+++ b/roles/openshift_repos/tasks/centos_repos.yml
@@ -0,0 +1,25 @@
+---
+# Note: OpenShift repositories under CentOS may be shipped through the
+# "centos-release-openshift-origin" package which configures the repository.
+# This task matches the file names provided by the package so that they are
+# not installed twice in different files and remains idempotent.
+
+- name: Configure origin gpg keys
+ copy:
+ src: "origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS"
+ dest: "/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS"
+ notify: refresh cache
+
+# openshift_release is formatted to a standard string in openshift_version role.
+# openshift_release is expected to be in format 'x.y.z...' here.
+# Here, we drop the '.' characters and try to match the correct repo template
+# for our corresponding openshift_release.
+- name: Configure correct origin release repository
+ template:
+ src: "{{ item }}"
+ dest: "/etc/yum.repos.d/{{ (item | basename | splitext)[0] }}"
+ with_first_found:
+ - "CentOS-OpenShift-Origin{{ (openshift_release | default('')).split('.') | join('') }}.repo.j2"
+ - "CentOS-OpenShift-Origin{{ ((openshift_release | default('')).split('.') | join(''))[0:2] }}.repo.j2"
+ - "CentOS-OpenShift-Origin.repo.j2"
+ notify: refresh cache
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index f972c0fd9..d41245093 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -30,30 +30,13 @@
- when: r_openshift_repos_has_run is not defined
block:
- # Note: OpenShift repositories under CentOS may be shipped through the
- # "centos-release-openshift-origin" package which configures the repository.
- # This task matches the file names provided by the package so that they are
- # not installed twice in different files and remains idempotent.
- - name: Configure origin repositories and gpg keys if needed
- copy:
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- with_items:
- - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
- dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
- - src: origin/repos/openshift-ansible-centos-paas-sig.repo
- dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo
- notify: refresh cache
+ - include: centos_repos.yml
when:
- ansible_os_family == "RedHat"
- ansible_distribution != "Fedora"
- openshift_deployment_type == 'origin'
- openshift_enable_origin_repo | default(true) | bool
- - name: Enable centos-openshift-origin-testing repository
- command: yum-config-manager --enable centos-openshift-origin-testing
- when: openshift_repos_enable_testing | bool
-
- name: Ensure clean repo cache in the event repos have been changed manually
debug:
msg: "First run of openshift_repos"
diff --git a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo b/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2
index 09364c26f..b0c036e7c 100644
--- a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo
+++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2
@@ -8,7 +8,7 @@ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
[centos-openshift-origin-testing]
name=CentOS OpenShift Origin Testing
baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin/
-enabled=0
+enabled={{ 1 if openshift_repos_enable_testing else 0 }}
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/openshift-ansible-CentOS-SIG-PaaS
diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2
new file mode 100644
index 000000000..97e855d58
--- /dev/null
+++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2
@@ -0,0 +1,27 @@
+[centos-openshift-origin14]
+name=CentOS OpenShift Origin
+baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin14/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin14-testing]
+name=CentOS OpenShift Origin Testing
+baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin14/
+enabled={{ 1 if openshift_repos_enable_testing else 0 }}
+gpgcheck=0
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin14-debuginfo]
+name=CentOS OpenShift Origin DebugInfo
+baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin14-source]
+name=CentOS OpenShift Origin Source
+baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin14/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2
new file mode 100644
index 000000000..5e756e680
--- /dev/null
+++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2
@@ -0,0 +1,27 @@
+[centos-openshift-origin15]
+name=CentOS OpenShift Origin
+baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin15/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin15-testing]
+name=CentOS OpenShift Origin Testing
+baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin15/
+enabled={{ 1 if openshift_repos_enable_testing else 0 }}
+gpgcheck=0
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin15-debuginfo]
+name=CentOS OpenShift Origin DebugInfo
+baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin15-source]
+name=CentOS OpenShift Origin Source
+baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin15/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2
new file mode 100644
index 000000000..7050c95f5
--- /dev/null
+++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2
@@ -0,0 +1,27 @@
+[centos-openshift-origin36]
+name=CentOS OpenShift Origin
+baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin36/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin36-testing]
+name=CentOS OpenShift Origin Testing
+baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin36/
+enabled={{ 1 if openshift_repos_enable_testing else 0 }}
+gpgcheck=0
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin36-debuginfo]
+name=CentOS OpenShift Origin DebugInfo
+baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin36-source]
+name=CentOS OpenShift Origin Source
+baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin36/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py
new file mode 100644
index 000000000..d42c9bdb9
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py
@@ -0,0 +1,25 @@
+'''
+ Openshift Logging class that provides useful filters used in Logging.
+
+ This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml
+'''
+
+
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(item.split(delim) for item in source.split(","))
+
+
+# pylint: disable=too-few-public-methods
+class FilterModule(object):
+ ''' OpenShift Logging Filters '''
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ ''' Returns the names of the filters provided by this class '''
+ return {
+ 'map_from_pairs': map_from_pairs
+ }
diff --git a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py
new file mode 100644
index 000000000..f61801714
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+
+""" Ansible module to help with setting facts conditionally based on other facts """
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+DOCUMENTATION = '''
+---
+module: conditional_set_fact
+
+short_description: This will set a fact if the value is defined
+
+description:
+ - "To avoid constant set_fact & when conditions for each var we can use this"
+
+author:
+ - Eric Wolinetz ewolinet@redhat.com
+'''
+
+
+EXAMPLES = '''
+- name: Conditionally set fact
+ conditional_set_fact:
+ fact1: not_defined_variable
+
+- name: Conditionally set fact
+ conditional_set_fact:
+ fact1: not_defined_variable
+ fact2: defined_variable
+
+'''
+
+
+def run_module():
+ """ The body of the module, we check if the variable name specified as the value
+ for the key is defined. If it is then we use that value as for the original key """
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ facts=dict(type='dict', required=True),
+ vars=dict(required=False, type='dict', default=[])
+ ),
+ supports_check_mode=True
+ )
+
+ local_facts = dict()
+ is_changed = False
+
+ for param in module.params['vars']:
+ other_var = module.params['vars'][param]
+
+ if other_var in module.params['facts']:
+ local_facts[param] = module.params['facts'][other_var]
+ if not is_changed:
+ is_changed = True
+
+ return module.exit_json(changed=is_changed, # noqa: F405
+ ansible_facts=local_facts)
+
+
+def main():
+ """ main """
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml
new file mode 100644
index 000000000..e534e0cca
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml
@@ -0,0 +1,48 @@
+---
+# this is used to set the logging variables from deprecated values to the current variables names
+# this file should be deleted once variables are no longer honored
+
+- conditional_set_fact:
+ facts: "{{ hostvars[inventory_hostname] }}"
+ vars:
+ logging_hostname: openshift_hosted_logging_hostname
+ logging_ops_hostname: openshift_hosted_logging_ops_hostname
+ logging_elasticsearch_cluster_size: openshift_hosted_logging_elasticsearch_cluster_size
+ logging_elasticsearch_ops_cluster_size: openshift_hosted_logging_elasticsearch_ops_cluster_size
+ openshift_logging_storage_kind: openshift_hosted_logging_storage_kind
+ openshift_logging_storage_host: openshift_hosted_logging_storage_host
+ openshift_logging_storage_labels: openshift_hosted_logging_storage_labels
+ openshift_logging_storage_volume_size: openshift_hosted_logging_storage_volume_size
+ openshift_loggingops_storage_kind: openshift_hosted_loggingops_storage_kind
+ openshift_loggingops_storage_host: openshift_hosted_loggingops_storage_host
+ openshift_loggingops_storage_labels: openshift_hosted_loggingops_storage_labels
+ openshift_loggingops_storage_volume_size: openshift_hosted_loggingops_storage_volume_size
+ openshift_logging_use_ops: openshift_hosted_logging_enable_ops_cluster
+ openshift_logging_image_pull_secret: openshift_hosted_logging_image_pull_secret
+ openshift_logging_kibana_hostname: openshift_hosted_logging_hostname
+ openshift_logging_kibana_ops_hostname: openshift_hosted_logging_ops_hostname
+ openshift_logging_fluentd_journal_source: openshift_hosted_logging_journal_source
+ openshift_logging_fluentd_journal_read_from_head: openshift_hosted_logging_journal_read_from_head
+ openshift_logging_es_memory_limit: openshift_hosted_logging_elasticsearch_instance_ram
+ openshift_logging_es_nodeselector: openshift_hosted_logging_elasticsearch_nodeselector
+ openshift_logging_es_ops_memory_limit: openshift_hosted_logging_elasticsearch_ops_instance_ram
+ openshift_logging_storage_access_modes: openshift_hosted_logging_storage_access_modes
+ openshift_logging_master_public_url: openshift_hosted_logging_master_public_url
+ openshift_logging_image_prefix: openshift_hosted_logging_deployer_prefix
+ openshift_logging_image_version: openshift_hosted_logging_deployer_version
+ openshift_logging_install_logging: openshift_hosted_logging_deploy
+
+
+- set_fact:
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_storage_volume_size | default('10Gi') if openshift_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
+ openshift_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size | default('10Gi') if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
+ openshift_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}"
+ openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}"
+ openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}"
+ openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}"
+ openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
+ openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml
new file mode 100644
index 000000000..279646981
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml
@@ -0,0 +1,17 @@
+---
+# this is used to set the metrics variables from deprecated values to the current variables names
+# this file should be deleted once variables are no longer honored
+
+- conditional_set_fact:
+ facts: "{{ hostvars[inventory_hostname] }}"
+ vars:
+ openshift_metrics_storage_access_modes: openshift_hosted_metrics_storage_access_modes
+ openshift_metrics_storage_host: openshift_hosted_metrics_storage_host
+ openshift_metrics_storage_nfs_directory: openshift_hosted_metrics_storage_nfs_directory
+ openshift_metrics_storage_volume_name: openshift_hosted_metrics_storage_volume_name
+ openshift_metrics_storage_volume_size: openshift_hosted_metrics_storage_volume_size
+ openshift_metrics_storage_labels: openshift_hosted_metrics_storage_labels
+ openshift_metrics_image_prefix: openshift_hosted_metrics_deployer_prefix
+ openshift_metrics_image_version: openshift_hosted_metrics_deployer_version
+ openshift_metrics_install_metrics: openshift_hosted_metrics_deploy
+ openshift_metrics_storage_kind: openshift_hosted_metrics_storage_kind
diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml
new file mode 100644
index 000000000..94d3acffc
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml
@@ -0,0 +1,21 @@
+---
+
+- name: Check for usage of deprecated variables
+ set_fact:
+ __deprecation_message: "{{ __deprecation_message | default([]) }} + ['{{ __deprecation_header }} {{ item }} is a deprecated variable and will be no longer be used in the next minor release. Please update your inventory accordingly.']"
+ when:
+ - hostvars[inventory_hostname][item] is defined
+ with_items: "{{ __warn_deprecated_vars }}"
+
+- block:
+ - debug: msg="{{__deprecation_message}}"
+ - pause:
+ seconds: "{{ 10 }}"
+ when:
+ - __deprecation_message | default ('') | length > 0
+
+# for with_fileglob Ansible resolves the path relative to the roles/<rolename>/files directory
+- name: Assign deprecated variables to correct counterparts
+ include: "{{ item }}"
+ with_fileglob:
+ - "../tasks/__deprecations_*.yml"
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 59ce505d3..e327ee9f5 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -1,4 +1,8 @@
---
+# We should print out deprecations prior to any failures so that if a play does fail for other reasons
+# the user would also be aware of any deprecated variables they should note to adjust
+- include: deprecations.yml
+
- name: Abort when conflicting deployment type variables are set
when:
- deployment_type is defined
diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml
index da48e42c1..0fc2372d2 100644
--- a/roles/openshift_sanitize_inventory/vars/main.yml
+++ b/roles/openshift_sanitize_inventory/vars/main.yml
@@ -1,7 +1,78 @@
---
# origin uses community packages named 'origin'
-# online currently uses 'openshift' packages
-# enterprise is used for OSE 3.0 < 3.1 which uses packages named 'openshift'
-# atomic-enterprise uses Red Hat packages named 'atomic-openshift'
-# openshift-enterprise uses Red Hat packages named 'atomic-openshift' starting with OSE 3.1
-known_openshift_deployment_types: ['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise']
+# openshift-enterprise uses Red Hat packages named 'atomic-openshift'
+known_openshift_deployment_types: ['origin', 'openshift-enterprise']
+
+__deprecation_header: "[DEPRECATION WARNING]:"
+
+# this is a list of variables that we will be deprecating within the next minor release, this list should be expected to change from release to release
+__warn_deprecated_vars:
+ # logging
+ - 'openshift_hosted_logging_deploy'
+ - 'openshift_hosted_logging_hostname'
+ - 'openshift_hosted_logging_ops_hostname'
+ - 'openshift_hosted_logging_master_public_url'
+ - 'openshift_hosted_logging_elasticsearch_cluster_size'
+ - 'openshift_hosted_logging_elasticsearch_ops_cluster_size'
+ - 'openshift_hosted_logging_image_pull_secret'
+ - 'openshift_hosted_logging_enable_ops_cluster'
+ - 'openshift_hosted_logging_curator_nodeselector'
+ - 'openshift_hosted_logging_curator_ops_nodeselector'
+ - 'openshift_hosted_logging_kibana_nodeselector'
+ - 'openshift_hosted_logging_kibana_ops_nodeselector'
+ - 'openshift_hosted_logging_fluentd_nodeselector_label'
+ - 'openshift_hosted_logging_journal_source'
+ - 'openshift_hosted_logging_journal_read_from_head'
+ - 'openshift_hosted_logging_elasticsearch_instance_ram'
+ - 'openshift_hosted_logging_storage_labels'
+ - 'openshift_hosted_logging_elasticsearch_pvc_dynamic'
+ - 'openshift_hosted_logging_elasticsearch_pvc_size'
+ - 'openshift_hosted_logging_elasticsearch_pvc_prefix'
+ - 'openshift_hosted_logging_elasticsearch_storage_group'
+ - 'openshift_hosted_logging_elasticsearch_nodeselector'
+ - 'openshift_hosted_logging_elasticsearch_ops_instance_ram'
+ - 'openshift_hosted_loggingops_storage_labels'
+ - 'openshift_hosted_logging_elasticsearch_ops_pvc_dynamic'
+ - 'openshift_hosted_logging_elasticsearch_ops_pvc_size'
+ - 'openshift_hosted_logging_elasticsearch_ops_pvc_prefix'
+ - 'openshift_hosted_logging_elasticsearch_storage_group'
+ - 'openshift_hosted_logging_elasticsearch_ops_nodeselector'
+ - 'openshift_hosted_logging_storage_access_modes'
+ - 'openshift_hosted_logging_storage_kind'
+ - 'openshift_hosted_loggingops_storage_kind'
+ - 'openshift_hosted_logging_storage_host'
+ - 'openshift_hosted_loggingops_storage_host'
+ - 'openshift_hosted_logging_storage_nfs_directory'
+ - 'openshift_hosted_loggingops_storage_nfs_directory'
+ - 'openshift_hosted_logging_storage_volume_name'
+ - 'openshift_hosted_loggingops_storage_volume_name'
+ - 'openshift_hosted_logging_storage_volume_size'
+ - 'openshift_hosted_loggingops_storage_volume_size'
+ - 'openshift_hosted_logging_enable_ops_cluster'
+ - 'openshift_hosted_logging_image_pull_secret'
+ - 'openshift_hosted_logging_curator_nodeselector'
+ - 'openshift_hosted_logging_curator_ops_nodeselector'
+ - 'openshift_hosted_logging_kibana_nodeselector'
+ - 'openshift_hosted_logging_kibana_ops_nodeselector'
+ - 'openshift_hosted_logging_ops_hostname'
+ - 'openshift_hosted_logging_fluentd_nodeselector_label'
+ - 'openshift_hosted_logging_journal_source'
+ - 'openshift_hosted_logging_journal_read_from_head'
+ - 'openshift_hosted_logging_elasticsearch_instance_ram'
+ - 'openshift_hosted_logging_elasticsearch_nodeselector'
+ - 'openshift_hosted_logging_elasticsearch_ops_instance_ram'
+ - 'openshift_hosted_logging_elasticsearch_ops_nodeselector'
+ - 'openshift_hosted_logging_storage_access_modes'
+ - 'openshift_hosted_logging_deployer_prefix'
+ - 'openshift_hosted_logging_deployer_version'
+ # metrics
+ - 'openshift_hosted_metrics_deploy'
+ - 'openshift_hosted_metrics_storage_kind'
+ - 'openshift_hosted_metrics_storage_access_modes'
+ - 'openshift_hosted_metrics_storage_host'
+ - 'openshift_hosted_metrics_storage_nfs_directory'
+ - 'openshift_hosted_metrics_storage_volume_name'
+ - 'openshift_hosted_metrics_storage_volume_size'
+ - 'openshift_hosted_metrics_storage_labels'
+ - 'openshift_hosted_metrics_deployer_prefix'
+ - 'openshift_hosted_metrics_deployer_version'
diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
deleted file mode 100644
index 1f25cc39f..000000000
--- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
+++ /dev/null
@@ -1,2 +0,0 @@
-window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true;
-window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.pod_presets = true;
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 746c73eaf..e202ae173 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -6,8 +6,6 @@
register: mktemp
changed_when: False
-- include: wire_aggregator.yml
-
- name: Set default image variables based on deployment_type
include_vars: "{{ item }}"
with_first_found:
@@ -25,10 +23,22 @@
name: "kube-service-catalog"
node_selector: ""
-- name: Make kube-service-catalog project network global
- command: >
- oc adm pod-network make-projects-global kube-service-catalog
- when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant'
+- when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant'
+ block:
+ - name: Waiting for netnamespace kube-service-catalog to be ready
+ oc_obj:
+ kind: netnamespace
+ name: kube-service-catalog
+ state: list
+ register: get_output
+ until: not get_output.results.stderr is defined
+ retries: 30
+ delay: 1
+ changed_when: false
+
+ - name: Make kube-service-catalog project network global
+ command: >
+ oc adm pod-network make-projects-global kube-service-catalog
- include: generate_certs.yml
@@ -112,15 +122,6 @@
when:
- not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
-- shell: >
- oc get policybindings/kube-system:default -n kube-system || echo "not found"
- register: get_kube_system
- changed_when: no
-
-- command: >
- oc create policybinding kube-system -n kube-system
- when: "'not found' in get_kube_system.stdout"
-
- oc_adm_policy_user:
namespace: kube-service-catalog
resource_kind: scc
diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
deleted file mode 100644
index 1c788470a..000000000
--- a/roles/openshift_service_catalog/tasks/wire_aggregator.yml
+++ /dev/null
@@ -1,198 +0,0 @@
----
-- name: Make temp cert dir
- command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX
- register: certtemp
- changed_when: False
-
-- name: Check for First Master Aggregator Signer cert
- stat:
- path: /etc/origin/master/front-proxy-ca.crt
- register: first_proxy_ca_crt
- changed_when: false
- delegate_to: "{{ first_master }}"
-
-- name: Check for First Master Aggregator Signer key
- stat:
- path: /etc/origin/master/front-proxy-ca.crt
- register: first_proxy_ca_key
- changed_when: false
- delegate_to: "{{ first_master }}"
-
-
-# TODO: this currently has a bug where hostnames are required
-- name: Creating First Master Aggregator signer certs
- command: >
- oc adm ca create-signer-cert
- --cert=/etc/origin/master/front-proxy-ca.crt
- --key=/etc/origin/master/front-proxy-ca.key
- --serial=/etc/origin/master/ca.serial.txt
- delegate_to: "{{ first_master }}"
- when:
- - not first_proxy_ca_crt.stat.exists
- - not first_proxy_ca_key.stat.exists
-
-- name: Check for Aggregator Signer cert
- stat:
- path: /etc/origin/master/front-proxy-ca.crt
- register: proxy_ca_crt
- changed_when: false
-
-- name: Check for Aggregator Signer key
- stat:
- path: /etc/origin/master/front-proxy-ca.crt
- register: proxy_ca_key
- changed_when: false
-
-- name: Copy Aggregator Signer certs from first master
- fetch:
- src: "/etc/origin/master/{{ item }}"
- dest: "{{ certtemp.stdout }}/{{ item }}"
- flat: yes
- with_items:
- - front-proxy-ca.crt
- - front-proxy-ca.key
- delegate_to: "{{ first_master }}"
- when:
- - not proxy_ca_key.stat.exists
- - not proxy_ca_crt.stat.exists
-
-- name: Copy Aggregator Signer certs to host
- copy:
- src: "{{ certtemp.stdout }}/{{ item }}"
- dest: "/etc/origin/master/{{ item }}"
- with_items:
- - front-proxy-ca.crt
- - front-proxy-ca.key
- when:
- - not proxy_ca_key.stat.exists
- - not proxy_ca_crt.stat.exists
-
-# oc_adm_ca_server_cert:
-# cert: /etc/origin/master/front-proxy-ca.crt
-# key: /etc/origin/master/front-proxy-ca.key
-
-- name: Check for first master api-client config
- stat:
- path: /etc/origin/master/aggregator-front-proxy.kubeconfig
- register: first_front_proxy_kubeconfig
- delegate_to: "{{ first_master }}"
-
-- name: Create first master api-client config for Aggregator
- command: >
- oc adm create-api-client-config
- --certificate-authority=/etc/origin/master/front-proxy-ca.crt
- --signer-cert=/etc/origin/master/front-proxy-ca.crt
- --signer-key=/etc/origin/master/front-proxy-ca.key
- --user aggregator-front-proxy
- --client-dir=/etc/origin/master
- --signer-serial=/etc/origin/master/ca.serial.txt
- delegate_to: "{{ first_master }}"
- when:
- - not first_front_proxy_kubeconfig.stat.exists
-
-- name: Check for api-client config
- stat:
- path: /etc/origin/master/aggregator-front-proxy.kubeconfig
- register: front_proxy_kubeconfig
-
-- name: Copy api-client config from first master
- fetch:
- src: "/etc/origin/master/{{ item }}"
- dest: "{{ certtemp.stdout }}/{{ item }}"
- flat: yes
- delegate_to: "{{ first_master }}"
- with_items:
- - aggregator-front-proxy.crt
- - aggregator-front-proxy.key
- - aggregator-front-proxy.kubeconfig
- when:
- - not front_proxy_kubeconfig.stat.exists
-
-- name: Copy api-client config to host
- copy:
- src: "{{ certtemp.stdout }}/{{ item }}"
- dest: "/etc/origin/master/{{ item }}"
- with_items:
- - aggregator-front-proxy.crt
- - aggregator-front-proxy.key
- - aggregator-front-proxy.kubeconfig
- when:
- - not front_proxy_kubeconfig.stat.exists
-
-- name: copy tech preview extension file for service console UI
- copy:
- src: openshift-ansible-catalog-console.js
- dest: /etc/origin/master/openshift-ansible-catalog-console.js
-
-- name: Update master config
- yedit:
- state: present
- src: /etc/origin/master/master-config.yaml
- edits:
- - key: aggregatorConfig.proxyClientInfo.certFile
- value: aggregator-front-proxy.crt
- - key: aggregatorConfig.proxyClientInfo.keyFile
- value: aggregator-front-proxy.key
- - key: authConfig.requestHeader.clientCA
- value: front-proxy-ca.crt
- - key: authConfig.requestHeader.clientCommonNames
- value: [aggregator-front-proxy]
- - key: authConfig.requestHeader.usernameHeaders
- value: [X-Remote-User]
- - key: authConfig.requestHeader.groupHeaders
- value: [X-Remote-Group]
- - key: authConfig.requestHeader.extraHeaderPrefixes
- value: [X-Remote-Extra-]
- - key: assetConfig.extensionScripts
- value: [/etc/origin/master/openshift-ansible-catalog-console.js]
- - key: kubernetesMasterConfig.apiServerArguments.runtime-config
- value: [apis/settings.k8s.io/v1alpha1=true]
- - key: admissionConfig.pluginConfig.PodPreset.configuration.kind
- value: DefaultAdmissionConfig
- - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion
- value: v1
- - key: admissionConfig.pluginConfig.PodPreset.configuration.disable
- value: false
- register: yedit_output
-
-#restart master serially here
-- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when:
- - yedit_output.changed
- - openshift.master.cluster_method == 'native'
-
-- name: restart master controllers
- systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
- when:
- - yedit_output.changed
- - openshift.master.cluster_method == 'native'
-
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
- when:
- - yedit_output.changed
-
-- name: Delete temp directory
- file:
- name: "{{ certtemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index a059745a6..d0bc0e028 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -76,10 +76,11 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| Name | Default value | Description |
|--------------------------------------------------|-------------------------|-----------------------------------------|
| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
-| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace in which to create GlusterFS resources
+| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources
| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
+| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
@@ -91,7 +92,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
-| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service.
+| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service.
| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes
| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 0b3d3aef1..148549887 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -3,6 +3,7 @@ openshift_storage_glusterfs_timeout: 300
openshift_storage_glusterfs_is_native: True
openshift_storage_glusterfs_name: 'storage'
openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
+openshift_storage_glusterfs_use_default_selector: False
openshift_storage_glusterfs_storageclass: True
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
@@ -31,6 +32,7 @@ openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.na
openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
openshift_storage_glusterfs_registry_name: 'registry'
openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
+openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
openshift_storage_glusterfs_registry_storageclass: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
@@ -58,9 +60,9 @@ r_openshift_storage_glusterfs_os_firewall_deny: []
r_openshift_storage_glusterfs_os_firewall_allow:
- service: glusterfs_sshd
port: "2222/tcp"
-- service: glusterfs_daemon
- port: "24007/tcp"
- service: glusterfs_management
+ port: "24007/tcp"
+- service: glusterfs_rdma
port: "24008/tcp"
- service: glusterfs_bricks
port: "49152-49251/tcp"
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
new file mode 100644
index 000000000..9ebb0d5ec
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
@@ -0,0 +1,143 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: topology
+ mountPath: ${TOPOLOGY_PATH}
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: topology
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-topology-secret
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
+- name: TOPOLOGY_PATH
+ displayName: heketi topology file location
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml
new file mode 100644
index 000000000..8c5e1ded3
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ resources: {}
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml
new file mode 100644
index 000000000..61b6a8c13
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml
@@ -0,0 +1,134 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index a43708d71..51724f979 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -15,8 +15,20 @@
oc_project:
state: present
name: "{{ glusterfs_namespace }}"
+ node_selector: "{% if glusterfs_use_default_selector %}{{ omit }}{% endif %}"
when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass
+- name: Add namespace service accounts to privileged SCC
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ with_items:
+ - 'default'
+ - 'router'
+ when: glusterfs_is_native or glusterfs_heketi_is_native
+
- name: Delete pre-existing heketi resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 7a2987883..012c722ff 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -5,6 +5,7 @@
glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_name }}"
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
+ glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 8c3e31fc9..932d06038 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -55,16 +55,6 @@
- glusterfs_wipe
- item.stdout_lines | count > 0
-- name: Add service accounts to privileged SCC
- oc_adm_policy_user:
- user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}"
- resource_kind: scc
- resource_name: privileged
- state: present
- with_items:
- - 'default'
- - 'router'
-
- name: Label GlusterFS nodes
oc_label:
name: "{{ hostvars[item].openshift.node.nodename }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 17f87578d..1bcab8e49 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -5,6 +5,7 @@
glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
+ glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..095fb780f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
new file mode 100644
index 000000000..579b11bb7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
@@ -0,0 +1,36 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ }
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index 51f8f4e0e..3047fbaf9 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -31,9 +31,9 @@
group: nfsnobody
with_items:
- "{{ openshift.hosted.registry }}"
- - "{{ openshift.hosted.metrics }}"
- - "{{ openshift.hosted.logging }}"
- - "{{ openshift.hosted.loggingops }}"
+ - "{{ openshift.metrics }}"
+ - "{{ openshift.logging }}"
+ - "{{ openshift.loggingops }}"
- "{{ openshift.hosted.etcd }}"
- name: Configure exports
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index 7e8f70b23..0141e0d25 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -1,5 +1,5 @@
{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }}
-{{ openshift.hosted.metrics.storage.nfs.directory }}/{{ openshift.hosted.metrics.storage.volume.name }} {{ openshift.hosted.metrics.storage.nfs.options }}
-{{ openshift.hosted.logging.storage.nfs.directory }}/{{ openshift.hosted.logging.storage.volume.name }} {{ openshift.hosted.logging.storage.nfs.options }}
-{{ openshift.hosted.loggingops.storage.nfs.directory }}/{{ openshift.hosted.loggingops.storage.volume.name }} {{ openshift.hosted.loggingops.storage.nfs.options }}
+{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }}
+{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }}
+{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}
{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 01a1a7472..53d10f1f8 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,2 +1,3 @@
---
openshift_protect_installed_version: True
+version_install_base_package: False
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 204abe27e..f4e9ff43a 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -5,11 +5,15 @@
is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
+# This is only needed on masters and nodes; version_install_base_package
+# should be set by a play externally.
- name: Install the base package for versioning
package:
name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- when: not is_containerized | bool
+ when:
+ - not is_containerized | bool
+ - version_install_base_package | bool
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
@@ -162,7 +166,9 @@
- set_fact:
openshift_pkg_version: -{{ openshift_version }}
- when: openshift_pkg_version is not defined
+ when:
+ - openshift_pkg_version is not defined
+ - openshift_upgrade_target is not defined
- fail:
msg: openshift_version role was unable to set openshift_version
@@ -177,7 +183,10 @@
- fail:
msg: openshift_version role was unable to set openshift_pkg_version
name: Abort if openshift_pkg_version was not set
- when: openshift_pkg_version is not defined
+ when:
+ - openshift_pkg_version is not defined
+ - openshift_upgrade_target is not defined
+
- fail:
msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml
index a2a579e9d..b727eb74d 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/set_version_containerized.yml
@@ -1,6 +1,6 @@
---
- set_fact:
- l_use_crio: "{{ openshift_use_crio | default(false) }}"
+ l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}"
- name: Set containerized version to configure if openshift_image_tag specified
set_fact:
@@ -22,7 +22,9 @@
command: >
docker run --rm {{ openshift.common.cli_image }}:latest version
register: cli_image_version
- when: openshift_version is not defined
+ when:
+ - openshift_version is not defined
+ - not l_use_crio_only
# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
- set_fact:
@@ -31,6 +33,7 @@
- openshift_version is not defined
- openshift.common.deployment_type == 'origin'
- cli_image_version.stdout_lines[0].split('-') | length > 1
+ - not l_use_crio_only
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
@@ -45,14 +48,14 @@
when:
- openshift_version is defined
- openshift_version.split('.') | length == 2
- - not l_use_crio
+ - not l_use_crio_only
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
when:
- openshift_version is defined
- openshift_version.split('.') | length == 2
- - not l_use_crio
+ - not l_use_crio_only
# TODO: figure out a way to check for the openshift_version when using CRI-O.
# We should do that using the images in the ostree storage so we don't have
diff --git a/roles/os_firewall/tasks/iptables.yml b/roles/os_firewall/tasks/iptables.yml
index 0af5abf38..2d74f2e48 100644
--- a/roles/os_firewall/tasks/iptables.yml
+++ b/roles/os_firewall/tasks/iptables.yml
@@ -33,7 +33,7 @@
register: result
delegate_to: "{{item}}"
run_once: true
- with_items: "{{ ansible_play_hosts }}"
+ with_items: "{{ ansible_play_batch }}"
- name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail
pause:
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index 39d59db70..fa74c9953 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -3,20 +3,17 @@
command: subscription-manager repos --disable="*"
- set_fact:
- default_ose_version: '3.0'
- when: deployment_type == 'enterprise'
-
-- set_fact:
default_ose_version: '3.6'
- when: deployment_type in ['atomic-enterprise', 'openshift-enterprise']
+ when: deployment_type == 'openshift-enterprise'
- set_fact:
- ose_version: "{{ lookup('oo_option', 'ose_version') | default(default_ose_version, True) }}"
+ ose_version: "{{ lookup('env', 'ose_version') | default(default_ose_version, True) }}"
- fail:
msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
- when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or
- ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] )
+ when:
+ - deployment_type == 'openshift-enterprise'
+ - ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] )
- name: Enable RHEL repositories
command: subscription-manager repos \
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 453044a6e..b06f51908 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -4,10 +4,10 @@
# to make it able to enable repositories
- set_fact:
- rhel_subscription_pool: "{{ lookup('oo_option', 'rhel_subscription_pool') | default(rhsub_pool, True) | default('Red Hat OpenShift Container Platform, Premium*', True) }}"
- rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}"
- rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}"
- rhel_subscription_server: "{{ lookup('oo_option', 'rhel_subscription_server') | default(rhsub_server) }}"
+ rhel_subscription_pool: "{{ lookup('env', 'rhel_subscription_pool') | default(rhsub_pool | default('Red Hat OpenShift Container Platform, Premium*')) }}"
+ rhel_subscription_user: "{{ lookup('env', 'rhel_subscription_user') | default(rhsub_user | default(omit, True)) }}"
+ rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}"
+ rhel_subscription_server: "{{ lookup('env', 'rhel_subscription_server') | default(rhsub_server | default(omit, True)) }}"
- fail:
msg: "This role is only supported for Red Hat hosts"
@@ -41,15 +41,19 @@
redhat_subscription:
username: "{{ rhel_subscription_user }}"
password: "{{ rhel_subscription_pass }}"
+ register: rh_subscription
+ until: rh_subscription | succeeded
- name: Retrieve the OpenShift Pool ID
command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only
register: openshift_pool_id
+ until: openshift_pool_id | succeeded
changed_when: False
- name: Determine if OpenShift Pool Already Attached
command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only
register: openshift_pool_attached
+ until: openshift_pool_attached | succeeded
changed_when: False
when: openshift_pool_id.stdout == ''
@@ -58,10 +62,12 @@
when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == ''
- name: Attach to OpenShift Pool
- command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }}
+ command: subscription-manager attach --pool {{ openshift_pool_id.stdout_lines[0] }}
+ register: subscribe_pool
+ until: subscribe_pool | succeeded
when: openshift_pool_id.stdout != ''
- include: enterprise.yml
when:
- - deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ]
+ - deployment_type == 'openshift-enterprise'
- not ostree_booted.stat.exists | bool
diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml
new file mode 100644
index 000000000..fb407c4a2
--- /dev/null
+++ b/roles/template_service_broker/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# placeholder file?
+template_service_broker_remove: False
+template_service_broker_install: False
diff --git a/roles/template_service_broker/files/openshift-ansible-catalog-console.js b/roles/template_service_broker/files/openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..b3a3d3428
--- /dev/null
+++ b/roles/template_service_broker/files/openshift-ansible-catalog-console.js
@@ -0,0 +1 @@
+window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.template_service_broker = true;
diff --git a/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js b/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..d0a9f11dc
--- /dev/null
+++ b/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js
@@ -0,0 +1,2 @@
+// empty file so that the master-config can still point to a file that exists
+// this file will be replaced by the template service broker role if enabled
diff --git a/roles/etcd_ca/meta/main.yml b/roles/template_service_broker/meta/main.yml
index e3e2f7781..ab5a0cf08 100644
--- a/roles/etcd_ca/meta/main.yml
+++ b/roles/template_service_broker/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
- author: Jason DeTiberus
- description: Etcd CA
+ author: OpenShift Red Hat
+ description: OpenShift Template Service Broker
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 2.1
@@ -11,6 +11,3 @@ galaxy_info:
- 7
categories:
- cloud
- - system
-dependencies:
-- role: etcd_common
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
new file mode 100644
index 000000000..f5fd6487c
--- /dev/null
+++ b/roles/template_service_broker/tasks/install.yml
@@ -0,0 +1,77 @@
+---
+# Fact setting
+- name: Set default image variables based on deployment type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: set ansible_service_broker facts
+ set_fact:
+ template_service_broker_prefix: "{{ template_service_broker_prefix | default(__template_service_broker_prefix) }}"
+ template_service_broker_version: "{{ template_service_broker_version | default(__template_service_broker_version) }}"
+ template_service_broker_image_name: "{{ template_service_broker_image_name | default(__template_service_broker_image_name) }}"
+
+- oc_project:
+ name: openshift-template-service-broker
+ state: present
+
+- command: mktemp -d /tmp/tsb-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ become: no
+
+- copy:
+ src: "{{ __tsb_files_location }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "{{ __tsb_template_file }}"
+ - "{{ __tsb_rbac_file }}"
+ - "{{ __tsb_broker_file }}"
+
+- name: Apply template file
+ shell: >
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ lookup('file', __tsb_files_location ~ '/' ~ __tsb_config_file) }}" --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" | kubectl apply -f -
+
+# reconcile with rbac
+- name: Reconcile with RBAC file
+ shell: >
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | oc auth reconcile -f -
+
+- name: copy tech preview extension file for service console UI
+ copy:
+ src: openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+# Check that the TSB is running
+- name: Verify that TSB is running
+ command: >
+ curl -k https://apiserver.openshift-template-service-broker.svc/healthz
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_health
+ until: api_health.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+
+- set_fact:
+ openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+ when: openshift_master_config_dir is undefined
+
+- slurp:
+ src: "{{ openshift_master_config_dir }}/ca.crt"
+ register: __ca_bundle
+
+# Register with broker
+- name: Register TSB with broker
+ shell: >
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | oc apply -f -
+
+- file:
+ state: absent
+ name: "{{ mktemp.stdout }}"
+ changed_when: False
+ become: no
diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml
new file mode 100644
index 000000000..d7ca970c7
--- /dev/null
+++ b/roles/template_service_broker/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include: install.yml
+ when: template_service_broker_install | default(false) | bool
+
+- include: remove.yml
+ when: template_service_broker_remove | default(false) | bool
diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml
new file mode 100644
index 000000000..f3afe65ed
--- /dev/null
+++ b/roles/template_service_broker/tasks/remove.yml
@@ -0,0 +1,35 @@
+---
+- command: mktemp -d /tmp/tsb-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ become: no
+
+- copy:
+ src: "{{ __tsb_files_location }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "{{ __tsb_template_file }}"
+ - "{{ __tsb_broker_file }}"
+
+- name: Delete TSB broker
+ shell: >
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | oc delete -f -
+
+- name: Delete TSB objects
+ shell: >
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | kubectl delete -f -
+
+- name: empty out tech preview extension file for service console UI
+ copy:
+ src: remove-openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+- oc_project:
+ name: openshift-template-service-broker
+ state: absent
+
+- file:
+ state: absent
+ name: "{{ mktemp.stdout }}"
+ changed_when: False
+ become: no
diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml
new file mode 100644
index 000000000..77afe1f43
--- /dev/null
+++ b/roles/template_service_broker/vars/default_images.yml
@@ -0,0 +1,4 @@
+---
+__template_service_broker_prefix: "docker.io/openshift/"
+__template_service_broker_version: "latest"
+__template_service_broker_image_name: "origin"
diff --git a/roles/template_service_broker/vars/main.yml b/roles/template_service_broker/vars/main.yml
new file mode 100644
index 000000000..a65340f16
--- /dev/null
+++ b/roles/template_service_broker/vars/main.yml
@@ -0,0 +1,7 @@
+---
+__tsb_files_location: "../../../files/origin-components/"
+
+__tsb_template_file: "apiserver-template.yaml"
+__tsb_config_file: "apiserver-config.yaml"
+__tsb_rbac_file: "rbac-template.yaml"
+__tsb_broker_file: "template-service-broker-registration.yaml"
diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..dfab1e01b
--- /dev/null
+++ b/roles/template_service_broker/vars/openshift-enterprise.yml
@@ -0,0 +1,4 @@
+---
+__template_service_broker_prefix: "registry.access.redhat.com/openshift3/"
+__template_service_broker_version: "v3.7"
+__template_service_broker_image_name: "ose"