diff options
212 files changed, 1366 insertions, 731 deletions
diff --git a/.papr.inventory b/.papr.inventory index aa4324c21..c678e76aa 100644 --- a/.papr.inventory +++ b/.papr.inventory @@ -6,7 +6,7 @@ etcd [OSEv3:vars] ansible_ssh_user=root ansible_python_interpreter=/usr/bin/python3 -deployment_type=origin +openshift_deployment_type=origin openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}" openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io" openshift_check_min_host_disk_gb=1.5 diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 1ca23082d..030e16c5c 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.9.0-0.10.0 ./ +3.9.0-0.15.0 ./ diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile index 0d977d48f..db362bd65 100644 --- a/images/installer/Dockerfile +++ b/images/installer/Dockerfile @@ -10,7 +10,7 @@ COPY images/installer/origin-extra-root / # install ansible and deps RUN INSTALL_PKGS="python-lxml pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \ && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \ - && EPEL_PKGS="ansible python2-boto" \ + && EPEL_PKGS="ansible python2-boto google-cloud-sdk-183.0.0 which" \ && yum install -y epel-release \ && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \ && rpm -V $INSTALL_PKGS $EPEL_PKGS \ diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7 index 5245771d0..05df6b43a 100644 --- a/images/installer/Dockerfile.rhel7 +++ b/images/installer/Dockerfile.rhel7 @@ -5,7 +5,7 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com> USER root # Playbooks, roles, and their dependencies are installed from packages. -RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools" \ +RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \ && yum repolist > /dev/null \ && yum-config-manager --enable rhel-7-server-ose-3.7-rpms \ && yum-config-manager --enable rhel-7-server-rh-common-rpms \ diff --git a/images/installer/origin-extra-root/etc/yum.repos.d/google-cloud-sdk.repo b/images/installer/origin-extra-root/etc/yum.repos.d/google-cloud-sdk.repo new file mode 100644 index 000000000..7bb8502e9 --- /dev/null +++ b/images/installer/origin-extra-root/etc/yum.repos.d/google-cloud-sdk.repo @@ -0,0 +1,8 @@ +[google-cloud-sdk] +name=google-cloud-sdk +baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg diff --git a/inventory/hosts.example b/inventory/hosts.example index d857cd1a7..bc85d1020 100644 --- a/inventory/hosts.example +++ b/inventory/hosts.example @@ -84,6 +84,9 @@ openshift_release=v3.7 # Configure extensions in the master config for console customization # See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_oauth_templates: +# login: /path/to/login-template.html +# openshift_master_oauth_template is deprecated. Use openshift_master_oauth_templates instead. #openshift_master_oauth_template=/path/to/login-template.html # Configure imagePolicyConfig in the master config @@ -941,7 +944,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5} # Enable origin repos that point at Centos PAAS SIG, defaults to true, only used -# by deployment_type=origin +# by openshift_deployment_type=origin #openshift_enable_origin_repo=false # Validity of the auto-generated OpenShift certificates in days. diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 2be8ebd5b..4b2a0ed25 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.9.0 -Release: 0.10.0%{?dist} +Release: 0.15.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -202,6 +202,67 @@ Atomic OpenShift Utilities includes %changelog +* Wed Jan 03 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.15.0 +- + +* Wed Jan 03 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.14.0 +- Cast openshift_docker_use_system_container to bool (mgugino@redhat.com) +- Correct kublet_args cloud-provider directories (mgugino@redhat.com) +- Updating logging_facts to be able to pull values from config maps yaml files, + use diffs to keep custom changes, white list certain settings when creating + diffs (ewolinet@redhat.com) +- Add docker auth credentials to system container install (mgugino@redhat.com) +- Move wait_for_pods to it's own play openshift_hosted (mgugino@redhat.com) +- Remove oauth_template bits from openshift_facts (mgugino@redhat.com) + +* Tue Jan 02 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.13.0 +- Bug 1527178 - installation of logging stack failed: Invalid version specified + for Elasticsearch (nhosoi@redhat.com) +- Remove bootstrap.yml from main.yml in openshift_node role + (mgugino@redhat.com) + +* Tue Jan 02 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.12.0 +- + +* Mon Jan 01 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.11.0 +- aws: Fix misnamed variable in provisioning_vars.yml.example + (mbarnes@fedoraproject.org) +- Fix container_runtime openshift_containerized_host_groups + (mgugino@redhat.com) +- Remove references to deployment_type (mgugino@redhat.com) +- Must directly specify google-cloud-sdk version (ccoleman@redhat.com) +- daemonset config role. (kwoodson@redhat.com) +- Move validate_hosts to prerequisites.yml (mgugino@redhat.com) +- Move sanity_checks into custom action plugin (mgugino@redhat.com) +- Remove openshift.common.{is_atomic|is_containerized} (mgugino@redhat.com) +- Adding support for docker-storage-setup on overlay (kwoodson@redhat.com) +- Add gcloud to the installer image (ccoleman@redhat.com) +- Remove some small items from openshift_facts (mgugino@redhat.com) +- Relocate filter plugins to lib_utils (mgugino@redhat.com) +- Fix hosted_reg_router selectors (mgugino@redhat.com) +- set repos after registration: convert to match task -> import_role model. + (markllama@gmail.com) +- Remove openshift_node_facts role (mgugino@redhat.com) +- Move node group tags to openshift_aws_{master,node}_group. + (abutcher@redhat.com) +- Add CentOS-OpenShift-Origin37 repo template. (abutcher@redhat.com) +- Adding no_log to registry_auth. (kwoodson@redhat.com) +- Fix rhel_repos disable command (mazzystr@gmail.com) +- Fix rhel_subscribe boolean (mgugino@redhat.com) +- Move repo and subscribe to prerequisites (mgugino@redhat.com) +- Deprecate using Ansible tests as filters (rteague@redhat.com) +- Removing config trigger for ES DC, updating to use a handler to rollout ES at + the end of a deployment, allowing for override with variable + (ewolinet@redhat.com) +- openshift_logging_{fluentd,mux}_file_buffer_limit mismatch + (nhosoi@redhat.com) +- Update version check to Ansible 2.4.1 (rteague@redhat.com) +- Remove openshift_node_facts part 1 (mgugino@redhat.com) +- Validate node hostname and IP address (rteague@redhat.com) +- Add missing openshift_service_type (mgugino@redhat.com) +- prevent TSB pods from spinning on inappropriate nodes (jminter@redhat.com) +- Add readiness probe to kuryr controller pod (ltomasbo@redhat.com) + * Thu Dec 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.10.0 - Bump requirements.txt to Ansible 2.4.1 (rteague@redhat.com) - Commit to stabalize RHSM operations. This code is derived from contrib diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 584117e6b..0e0e2b425 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -18,9 +18,8 @@ # Since we're not calling openshift_facts we'll do this for now - set_fact: - is_atomic: "{{ ostree_output.rc == 0 }}" - - set_fact: - is_containerized: "{{ is_atomic or containerized | default(false) | bool }}" + openshift_is_atomic: "{{ ostree_output.rc == 0 }}" + openshift_is_containerized: "{{ ostree_output.rc == 0 or containerized | default(false) | bool }}" # Stop services on all hosts prior to removing files. - hosts: nodes @@ -133,7 +132,7 @@ when: openshift_use_flannel | default(false) | bool register: result until: result is succeeded - when: not is_atomic | bool + when: not openshift_is_atomic | bool - shell: systemctl reset-failed changed_when: False @@ -363,7 +362,7 @@ - name: Remove packages package: name={{ item }} state=absent - when: not is_atomic | bool and openshift_remove_all | default(True) | bool + when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool with_items: - atomic-openshift - atomic-openshift-clients @@ -487,14 +486,14 @@ - name: Stop additional atomic services service: name={{ item }} state=stopped - when: is_containerized | bool + when: openshift_is_containerized | bool with_items: - etcd_container failed_when: false - name: Remove packages package: name={{ item }} state=absent - when: not is_atomic | bool and openshift_remove_all | default(True) | bool + when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool with_items: - etcd - etcd3 @@ -554,7 +553,7 @@ - name: Remove packages package: name={{ item }} state=absent - when: not is_atomic | bool and openshift_remove_all | default(True) | bool + when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool with_items: - haproxy register: result diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example index 1491fb868..2eb7d23d4 100644 --- a/playbooks/aws/provisioning_vars.yml.example +++ b/playbooks/aws/provisioning_vars.yml.example @@ -46,7 +46,7 @@ openshift_pkg_version: # -3.7.0 # Name of the subnet in the vpc to use. Needs to be set if using a pre-existing # vpc + subnet. -#openshift_aws_subnet_name: +#openshift_aws_subnet_az: # -------------- # # Security Group # diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index dc9d0a139..f70f05bac 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -6,7 +6,7 @@ roles: - role: rhel_subscribe when: - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' - ansible_distribution == "RedHat" - rhsub_user is defined - rhsub_pass is defined diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index 23cf8cf76..372a39e74 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -22,7 +22,7 @@ - name: Create service signer certificate command: > - {{ openshift.common.client_binary }} adm ca create-signer-cert + {{ openshift_client_binary }} adm ca create-signer-cert --cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt --key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key --name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 42cd51bd9..28ddc3ded 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -12,12 +12,9 @@ roles: - openshift_facts tasks: - - set_fact: - repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" - - fail: msg: Cannot upgrade Docker on Atomic operating systems. - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - include_role: name: container_runtime @@ -54,7 +51,7 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade register: l_docker_upgrade_drain_result diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index 385a141ea..3b47a11e0 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -15,7 +15,7 @@ - "{{ openshift_service_type }}-master-controllers" - "{{ openshift_service_type }}-node" failed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Wait for master API to come back online wait_for: diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml index b5000d3a1..54eeb2ef5 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml @@ -10,7 +10,7 @@ - etcd_container - openvswitch failed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Check Docker image count shell: "docker images -aq | wc -l" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml index 3fc18c9b7..693ab2d96 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -5,11 +5,6 @@ hosts: oo_first_master gather_facts: no tasks: - - fail: - msg: > - This upgrade is only supported for origin and openshift-enterprise - deployment types - when: deployment_type not in ['origin','openshift-enterprise'] # Error out in situations where the user has older versions specified in their # inventory in any of the openshift_release, openshift_image_tag, and @@ -71,7 +66,7 @@ local_facts: ha: "{{ groups.oo_masters_to_config | length > 1 }}" - - when: openshift.common.is_containerized | bool + - when: openshift_is_containerized | bool block: - set_fact: master_services: diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 065a9a8ab..b0b5a7e4b 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -15,9 +15,9 @@ docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }} register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool -- when: not openshift.common.is_containerized | bool +- when: not openshift_is_containerized | bool block: - name: Check latest available OpenShift RPM version repoquery: @@ -49,5 +49,5 @@ fail: msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" when: - - deployment_type == 'origin' + - openshift_deployment_type == 'origin' - openshift.common.version is version_compare(openshift_upgrade_min,'<') diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 0ddccfa98..0263e721d 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -25,7 +25,7 @@ tasks: - name: Upgrade all storage command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm register: l_pb_upgrade_control_plane_pre_upgrade_storage when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool @@ -86,7 +86,7 @@ - name: Post master upgrade - Upgrade clusterpolicies storage command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=clusterpolicies --confirm register: l_pb_upgrade_control_plane_post_upgrade_storage when: @@ -133,7 +133,7 @@ tasks: - name: Reconcile Cluster Roles command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --additive-only=true --confirm -o name register: reconcile_cluster_role_result when: openshift_version is version_compare('3.7','<') @@ -144,7 +144,7 @@ - name: Reconcile Cluster Role Bindings command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings --exclude-groups=system:authenticated --exclude-groups=system:authenticated:oauth @@ -160,7 +160,7 @@ - name: Reconcile Jenkins Pipeline Role Bindings command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name + {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name run_once: true register: reconcile_jenkins_role_binding_result changed_when: @@ -214,7 +214,7 @@ - name: Reconcile Security Context Constraints command: > - {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name + {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name register: reconcile_scc_result changed_when: - reconcile_scc_result.stdout != '' @@ -223,7 +223,7 @@ - name: Migrate storage post policy reconciliation command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm run_once: true register: l_pb_upgrade_control_plane_post_upgrade_storage @@ -262,7 +262,7 @@ - openshift_facts tasks: - include_tasks: docker/tasks/upgrade.yml - when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool + when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift_is_atomic | bool - name: Drain and upgrade master nodes hosts: oo_masters_to_config:&oo_nodes_to_upgrade @@ -291,7 +291,7 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_control_plane_drain_result until: not (l_upgrade_control_plane_drain_result is failed) diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 956ad0d53..ece69a3d5 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -35,7 +35,7 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not (l_upgrade_nodes_drain_result is failed) diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index e8c0f361a..a90082760 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -43,7 +43,7 @@ tasks: - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index a5ad3801d..d520c6aee 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -13,7 +13,7 @@ tasks: - set_fact: openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" - import_playbook: ../pre/config.yml vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 1498db4c5..a956fdde5 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -20,7 +20,7 @@ tasks: - set_fact: openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" - import_playbook: ../pre/config.yml vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 6958652d8..4febe76ee 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -15,7 +15,7 @@ tasks: - set_fact: openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" - import_playbook: ../pre/config.yml vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index c8c87a9c3..49e691352 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -14,7 +14,7 @@ # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO - name: Confirm OpenShift authorization objects are in sync command: > - {{ openshift.common.client_binary }} adm migrate authorization + {{ openshift_client_binary }} adm migrate authorization when: - openshift_currently_installed_version is version_compare('3.7','<') - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml index 67445edeb..d8fc93710 100644 --- a/playbooks/container-runtime/private/config.yml +++ b/playbooks/container-runtime/private/config.yml @@ -1,7 +1,7 @@ --- - hosts: "{{ l_containerized_host_groups }}" vars: - l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}" + l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}" l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" # role: container_runtime is necessary here to bring role default variables # into the play scope. diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml index 97226d6b2..54fa5ca66 100644 --- a/playbooks/container-runtime/private/setup_storage.yml +++ b/playbooks/container-runtime/private/setup_storage.yml @@ -1,7 +1,7 @@ --- - hosts: "{{ l_containerized_host_groups }}" vars: - l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}" + l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}" l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" # role: container_runtime is necessary here to bring role default variables # into the play scope. diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml new file mode 100644 index 000000000..f7007087c --- /dev/null +++ b/playbooks/init/base_packages.yml @@ -0,0 +1,37 @@ +--- +- name: Ensure that all non-node hosts are accessible + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config + any_errors_fatal: true + tasks: + - when: + - not openshift_is_atomic | bool + block: + - name: Ensure openshift-ansible installer package deps are installed + package: + name: "{{ item }}" + state: present + with_items: + - iproute + - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" + - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" + - yum-utils + register: result + until: result is succeeded + + - name: Ensure various deps for running system containers are installed + package: + name: "{{ item }}" + state: present + with_items: + - atomic + - ostree + - runc + when: + - > + (openshift_use_system_containers | default(False)) | bool + or (openshift_use_etcd_system_container | default(False)) | bool + or (openshift_use_openvswitch_system_container | default(False)) | bool + or (openshift_use_node_system_container | default(False)) | bool + or (openshift_use_master_system_container | default(False)) | bool + register: result + until: result is succeeded diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml index 9fec95b17..9e411a551 100644 --- a/playbooks/init/facts.yml +++ b/playbooks/init/facts.yml @@ -21,40 +21,24 @@ path: /run/ostree-booted register: ostree_booted - # Locally setup containerized facts for now - - name: initialize_facts set fact l_is_atomic + # TODO(michaelgugino) remove this line once CI is updated. + - name: set openshift_deployment_type if unset set_fact: - l_is_atomic: "{{ ostree_booted.stat.exists }}" - - - name: initialize_facts set fact for containerized and l_is_*_system_container - set_fact: - l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}" - - # TODO: Should this be moved into health checks?? - # Seems as though any check that happens with a corresponding fail should move into health_checks - - name: Validate python version - ans_dist is fedora and python is v3 - fail: - msg: | - openshift-ansible requires Python 3 for {{ ansible_distribution }}; - For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html + openshift_deployment_type: "{{ deployment_type }}" when: - - ansible_distribution == 'Fedora' - - ansible_python['version']['major'] != 3 + - openshift_deployment_type is undefined + - deployment_type is defined - # TODO: Should this be moved into health checks?? - # Seems as though any check that happens with a corresponding fail should move into health_checks - - name: Validate python version - ans_dist not Fedora and python must be v2 - fail: - msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}" - when: - - ansible_distribution != 'Fedora' - - ansible_python['version']['major'] != 2 + - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized + set_fact: + openshift_is_atomic: "{{ ostree_booted.stat.exists }}" + openshift_is_containerized: "{{ ostree_booted.stat.exists or (containerized | default(false) | bool) }}" # TODO: Should this be moved into health checks?? # Seems as though any check that happens with a corresponding fail should move into health_checks # Fail as early as possible if Atomic and old version of Docker - when: - - l_is_atomic | bool + - openshift_is_atomic | bool block: # See https://access.redhat.com/articles/2317361 @@ -72,40 +56,7 @@ - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=') msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host. - - when: - - not l_is_atomic | bool - block: - - name: Ensure openshift-ansible installer package deps are installed - package: - name: "{{ item }}" - state: present - with_items: - - iproute - - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" - - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" - - yum-utils - register: result - until: result is succeeded - - - name: Ensure various deps for running system containers are installed - package: - name: "{{ item }}" - state: present - with_items: - - atomic - - ostree - - runc - when: - - > - (openshift_use_system_containers | default(False)) | bool - or (openshift_use_etcd_system_container | default(False)) | bool - or (openshift_use_openvswitch_system_container | default(False)) | bool - or (openshift_use_node_system_container | default(False)) | bool - or (openshift_use_master_system_container | default(False)) | bool - register: result - until: result is succeeded - - - name: Gather Cluster facts and set is_containerized if needed + - name: Gather Cluster facts openshift_facts: role: common local_facts: @@ -113,7 +64,6 @@ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" hostname: "{{ openshift_hostname | default(None) }}" ip: "{{ openshift_ip | default(None) }}" - is_containerized: "{{ l_is_containerized | default(None) }}" public_hostname: "{{ openshift_public_hostname | default(None) }}" public_ip: "{{ openshift_public_ip | default(None) }}" portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" @@ -141,7 +91,12 @@ local_facts: sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" - - name: initialize_facts set_fact repoquery command - set_fact: - repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" - repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}" +- name: Initialize special first-master variables + hosts: oo_first_master + roles: + - role: openshift_facts + tasks: + - set_fact: + # We need to setup openshift_client_binary here for special uses of delegate_to in + # later roles and plays. + first_master_client_binary: "{{ openshift_client_binary }}" diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml index 06e8ba504..20457e508 100644 --- a/playbooks/init/main.yml +++ b/playbooks/init/main.yml @@ -20,9 +20,6 @@ - import_playbook: sanity_checks.yml when: not (skip_sanity_checks | default(False)) -- import_playbook: validate_hostnames.yml - when: not (skip_validate_hostnames | default(False)) - - import_playbook: version.yml when: not (skip_verison | default(False)) diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml index 66786a41a..866c889b6 100644 --- a/playbooks/init/repos.yml +++ b/playbooks/init/repos.yml @@ -8,7 +8,7 @@ name: rhel_subscribe when: - ansible_distribution == 'RedHat' - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' - rhsub_user is defined - rhsub_pass is defined - name: initialize openshift repos diff --git a/playbooks/init/sanity_checks.yml b/playbooks/init/sanity_checks.yml index 26716a92d..52bcf42c0 100644 --- a/playbooks/init/sanity_checks.yml +++ b/playbooks/init/sanity_checks.yml @@ -1,51 +1,15 @@ --- - name: Verify Requirements - hosts: oo_all_hosts + hosts: oo_first_master + roles: + - role: lib_utils tasks: - - fail: - msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool - - - fail: - msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool - - - fail: - msg: Nuage sdn can not be used with flannel - when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool - - - fail: - msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool - - - fail: - msg: Contiv can not be used with flannel - when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool - - - fail: - msg: Contiv can not be used with nuage - when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool - - - fail: - msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool - - - fail: - msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both. - when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool - - - fail: - msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both - when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool - - - fail: - msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both - when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool - - - fail: - msg: openshift_hostname must be 63 characters or less - when: openshift_hostname is defined and openshift_hostname | length > 63 - - - fail: - msg: openshift_public_hostname must be 63 characters or less - when: openshift_public_hostname is defined and openshift_public_hostname | length > 63 + # sanity_checks is a custom action plugin defined in lib_utils. + # This module will loop through all the hostvars for each host + # specified in check_hosts. + # Since sanity_checks is an action_plugin, it executes on the control host. + # Thus, sanity_checks cannot gather new information about any hosts. + - name: Run variable sanity checks + sanity_checks: + check_hosts: "{{ groups['oo_all_hosts'] }}" + run_once: True diff --git a/playbooks/openshift-etcd/private/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml index 339fc6b74..d4386249e 100644 --- a/playbooks/openshift-etcd/private/upgrade_image_members.yml +++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml @@ -1,7 +1,7 @@ --- # INPUT etcd_upgrade_version # INPUT etcd_container_version -# INPUT openshift.common.is_containerized +# INPUT openshift_is_containerized - name: Upgrade containerized hosts to {{ etcd_upgrade_version }} hosts: oo_etcd_hosts_to_upgrade serial: 1 @@ -14,4 +14,4 @@ etcd_peer: "{{ openshift.common.hostname }}" when: - etcd_container_version | default('99') is version_compare(etcd_upgrade_version,'<') - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool diff --git a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml index 327a35b09..f7fe6cd9c 100644 --- a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml +++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml @@ -1,7 +1,7 @@ --- # INPUT etcd_upgrade_version # INPUT etcd_rpm_version -# INPUT openshift.common.is_containerized +# INPUT openshift_is_containerized - name: Upgrade to {{ etcd_upgrade_version }} hosts: oo_etcd_hosts_to_upgrade serial: 1 @@ -15,4 +15,4 @@ when: - etcd_rpm_version.stdout | default('99') is version_compare(etcd_upgrade_version, '<') - ansible_distribution == 'RedHat' - - not openshift.common.is_containerized | bool + - not openshift_is_containerized | bool diff --git a/playbooks/openshift-etcd/private/upgrade_step.yml b/playbooks/openshift-etcd/private/upgrade_step.yml index 60127fc68..05c543d62 100644 --- a/playbooks/openshift-etcd/private/upgrade_step.yml +++ b/playbooks/openshift-etcd/private/upgrade_step.yml @@ -61,4 +61,4 @@ etcd_peer: "{{ openshift.common.hostname }}" when: - ansible_distribution == 'Fedora' - - not openshift.common.is_containerized | bool + - not openshift_is_containerized | bool diff --git a/playbooks/openshift-glusterfs/README.md b/playbooks/openshift-glusterfs/README.md index 107bbfff6..19c381490 100644 --- a/playbooks/openshift-glusterfs/README.md +++ b/playbooks/openshift-glusterfs/README.md @@ -63,7 +63,7 @@ glusterfs [OSEv3:vars] ansible_ssh_user=root -deployment_type=origin +openshift_deployment_type=origin [masters] master diff --git a/playbooks/openshift-hosted/private/config.yml b/playbooks/openshift-hosted/private/config.yml index 036fe654d..4e7b98da2 100644 --- a/playbooks/openshift-hosted/private/config.yml +++ b/playbooks/openshift-hosted/private/config.yml @@ -21,6 +21,10 @@ - import_playbook: openshift_hosted_registry.yml +- import_playbook: openshift_hosted_wait_for_pods.yml + +- import_playbook: openshift_hosted_registry_storage.yml + - import_playbook: cockpit-ui.yml - import_playbook: install_docker_gc.yml diff --git a/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml b/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml new file mode 100644 index 000000000..9a407b69e --- /dev/null +++ b/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml @@ -0,0 +1,13 @@ +--- +# This playbook waits for registry and router pods after both have been +# created. It is intended to allow the tasks of deploying both to complete +# before polling to save time. +- name: Poll for hosted pod deployments + hosts: oo_first_master + tasks: + - include_role: + name: openshift_hosted + tasks_from: registry_storage.yml + when: + - openshift_hosted_manage_registry | default(True) | bool + - openshift_hosted_registry_registryurl is defined diff --git a/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml b/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml new file mode 100644 index 000000000..204cb1781 --- /dev/null +++ b/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml @@ -0,0 +1,26 @@ +--- +# This playbook waits for registry and router pods after both have been +# created. It is intended to allow the tasks of deploying both to complete +# before polling to save time. +- name: Poll for hosted pod deployments + hosts: oo_first_master + tasks: + - include_role: + name: openshift_hosted + tasks_from: wait_for_pod.yml + vars: + l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}" + l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}" + when: + - openshift_hosted_manage_router | default(True) | bool + - openshift_hosted_router_registryurl is defined + + - include_role: + name: openshift_hosted + tasks_from: wait_for_pod.yml + vars: + l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}" + l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}" + when: + - openshift_hosted_manage_registry | default(True) | bool + - openshift_hosted_registry_registryurl is defined diff --git a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml index 3943720e3..b817221b8 100644 --- a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml @@ -17,7 +17,7 @@ - name: Determine if docker-registry exists command: > - {{ openshift.common.client_binary }} get dc/docker-registry -o json + {{ openshift_client_binary }} get dc/docker-registry -o json --config={{ mktemp.stdout }}/admin.kubeconfig -n default register: l_docker_registry_dc @@ -38,7 +38,7 @@ # Replace dc/docker-registry environment variable certificate data if set. - name: Update docker-registry environment variables shell: > - {{ openshift.common.client_binary }} env dc/docker-registry + {{ openshift_client_binary }} env dc/docker-registry OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)" OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-registry.crt)" OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-registry.key)" @@ -62,7 +62,7 @@ - name: Generate registry certificate command: > - {{ openshift.common.client_binary }} adm ca create-server-cert + {{ openshift_client_binary }} adm ca create-server-cert --signer-cert={{ openshift.common.config_base }}/master/ca.crt --signer-key={{ openshift.common.config_base }}/master/ca.key --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt @@ -88,7 +88,7 @@ - name: Redeploy docker registry command: > - {{ openshift.common.client_binary }} deploy dc/docker-registry + {{ openshift_client_binary }} deploy dc/docker-registry --latest --config={{ mktemp.stdout }}/admin.kubeconfig -n default diff --git a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml index b1f60f1ae..c19147d41 100644 --- a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml @@ -17,7 +17,7 @@ - name: Determine if router exists command: > - {{ openshift.common.client_binary }} get dc/router -o json + {{ openshift_client_binary }} get dc/router -o json --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default register: l_router_dc @@ -26,7 +26,7 @@ - name: Determine if router service exists command: > - {{ openshift.common.client_binary }} get svc/router -o json + {{ openshift_client_binary }} get svc/router -o json --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default register: l_router_svc @@ -52,7 +52,7 @@ - name: Update router environment variables shell: > - {{ openshift.common.client_binary }} env dc/router + {{ openshift_client_binary }} env dc/router OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)" OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)" OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)" @@ -78,7 +78,7 @@ - name: Remove router service annotations command: > - {{ openshift.common.client_binary }} annotate service/router + {{ openshift_client_binary }} annotate service/router service.alpha.openshift.io/serving-cert-secret-name- service.alpha.openshift.io/serving-cert-signed-by- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig @@ -86,7 +86,7 @@ - name: Add serving-cert-secret annotation to router service command: > - {{ openshift.common.client_binary }} annotate service/router + {{ openshift_client_binary }} annotate service/router service.alpha.openshift.io/serving-cert-secret-name=router-certs --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default @@ -129,7 +129,7 @@ - name: Redeploy router command: > - {{ openshift.common.client_binary }} deploy dc/router + {{ openshift_client_binary }} deploy dc/router --latest --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index a90cd6b22..85be0e600 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -30,8 +30,8 @@ when: openshift_use_manageiq | default(true) | bool - role: cockpit when: - - not openshift.common.is_atomic | bool - - deployment_type == 'openshift-enterprise' + - not openshift_is_atomic | bool + - openshift_deployment_type == 'openshift-enterprise' - osm_use_cockpit is undefined or osm_use_cockpit | bool - openshift.common.deployment_subtype != 'registry' - role: flannel_register diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml index 3093444b4..e53a6f093 100644 --- a/playbooks/openshift-master/private/config.yml +++ b/playbooks/openshift-master/private/config.yml @@ -47,7 +47,7 @@ state: absent when: - rpmgenerated_config.stat.exists == true - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' with_items: - master - node diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml index 4f55d5c82..59e2b515c 100644 --- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -21,7 +21,7 @@ # TODO: this currently has a bug where hostnames are required - name: Creating First Master Aggregator signer certs command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm ca create-signer-cert --cert=/etc/origin/master/front-proxy-ca.crt --key=/etc/origin/master/front-proxy-ca.key --serial=/etc/origin/master/ca.serial.txt @@ -84,7 +84,7 @@ - block: - name: Create first master api-client config for Aggregator command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm create-api-client-config --certificate-authority=/etc/origin/master/front-proxy-ca.crt --signer-cert=/etc/origin/master/front-proxy-ca.crt --signer-key=/etc/origin/master/front-proxy-ca.key diff --git a/playbooks/openshift-node/private/image_prep.yml b/playbooks/openshift-node/private/image_prep.yml index 6b517197d..c0ddcd926 100644 --- a/playbooks/openshift-node/private/image_prep.yml +++ b/playbooks/openshift-node/private/image_prep.yml @@ -12,6 +12,13 @@ - name: run node config import_playbook: configure_nodes.yml +- name: node bootstrap config + hosts: oo_nodes_to_config:!oo_containerized_master_nodes + tasks: + - include_role: + name: openshift_node + tasks_from: bootstrap.yml + - name: Re-enable excluders import_playbook: enable_excluders.yml diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index c2092b23c..7249ced70 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -28,7 +28,7 @@ - "{{ openshift_service_type }}-master-controllers" - "{{ openshift_service_type }}-node" failed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Wait for master API to come back online wait_for: diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml index 541913aef..802dce37e 100644 --- a/playbooks/openshift-node/private/setup.yml +++ b/playbooks/openshift-node/private/setup.yml @@ -21,6 +21,6 @@ when: - hostvars[item].openshift is defined - hostvars[item].openshift.common is defined - - hostvars[item].openshift.common.is_containerized | bool + - hostvars[item].openshift_is_containerized | bool - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) changed_when: False diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml index 68d7f3359..7802f83d9 100644 --- a/playbooks/prerequisites.yml +++ b/playbooks/prerequisites.yml @@ -3,8 +3,13 @@ vars: skip_verison: True +- import_playbook: init/validate_hostnames.yml + when: not (skip_validate_hostnames | default(False)) + - import_playbook: init/repos.yml +- import_playbook: init/base_packages.yml + # This is required for container runtime for crio, only needs to run once. - name: Configure os_firewall hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index 4ca47d074..ba2f7293b 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -4,7 +4,7 @@ - name: Set default image variables based on deployment type include_vars: "{{ item }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" - name: set ansible_service_broker facts diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml index 16d960d8b..05415a4d6 100644 --- a/roles/calico_master/tasks/main.yml +++ b/roles/calico_master/tasks/main.yml @@ -19,7 +19,7 @@ - name: Calico Master | Launch Calico Policy Controller command: > - {{ openshift.common.client_binary }} create + {{ openshift_client_binary }} create -f {{ mktemp.stdout }}/calico-policy-controller.yml --config={{ openshift.common.config_base }}/master/admin.kubeconfig register: calico_create_output diff --git a/roles/cockpit-ui/meta/main.yml b/roles/cockpit-ui/meta/main.yml index 2250fe4cb..372c29c28 100644 --- a/roles/cockpit-ui/meta/main.yml +++ b/roles/cockpit-ui/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info: dependencies: - role: lib_utils - role: lib_openshift +- role: openshift_facts diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml index f60912033..d4174d879 100644 --- a/roles/cockpit-ui/tasks/main.yml +++ b/roles/cockpit-ui/tasks/main.yml @@ -39,7 +39,7 @@ - name: Deploy registry-console command: > - {{ openshift.common.client_binary }} new-app --template=registry-console + {{ openshift_client_binary }} new-app --template=registry-console {% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %} {% if openshift_cockpit_deployer_basename is defined %}-p IMAGE_BASENAME="{{ openshift_cockpit_deployer_basename }}"{% endif %} {% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %} diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index fc13afed3..577cd7daf 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -10,7 +10,7 @@ - cockpit-bridge - cockpit-docker - "{{ cockpit_plugins }}" - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded @@ -19,4 +19,4 @@ name: cockpit.socket enabled: true state: started - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml index 3e4b0c2b8..d0e37e2f4 100644 --- a/roles/container_runtime/defaults/main.yml +++ b/roles/container_runtime/defaults/main.yml @@ -2,8 +2,6 @@ docker_cli_auth_config_path: '/root/.docker' openshift_docker_signature_verification: False -repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" - openshift_docker_alternative_creds: False # oreg_url is defined by user input. @@ -13,7 +11,7 @@ oreg_auth_credentials_replace: False openshift_docker_use_system_container: False openshift_docker_disable_push_dockerhub: False # bool openshift_docker_selinux_enabled: True -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" openshift_docker_hosted_registry_insecure: False # bool diff --git a/roles/container_runtime/meta/main.yml b/roles/container_runtime/meta/main.yml index 5c4c569de..3bc2607fb 100644 --- a/roles/container_runtime/meta/main.yml +++ b/roles/container_runtime/meta/main.yml @@ -12,3 +12,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: lib_utils +- role: openshift_facts diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml index b41122880..d429047e6 100644 --- a/roles/container_runtime/tasks/common/syscontainer_packages.yml +++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml @@ -4,7 +4,7 @@ package: name: container-selinux state: present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded @@ -13,7 +13,7 @@ package: name: atomic state: present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded @@ -23,6 +23,6 @@ package: name: runc state: present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml index 6731963dd..7831f4c7d 100644 --- a/roles/container_runtime/tasks/docker_upgrade_check.yml +++ b/roles/container_runtime/tasks/docker_upgrade_check.yml @@ -61,14 +61,14 @@ - name: Determine available Docker shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" register: g_atomic_docker_version_result - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - set_fact: l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - fail: msg: This playbook requires access to Docker 1.12 or later when: - - openshift.common.is_atomic | bool + - openshift_is_atomic | bool - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<') diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml index 6604e6ad5..d6e7e7fed 100644 --- a/roles/container_runtime/tasks/package_docker.yml +++ b/roles/container_runtime/tasks/package_docker.yml @@ -3,7 +3,7 @@ - name: Get current installed Docker version command: "{{ repoquery_installed }} --qf '%{version}' docker" - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: curr_docker_version retries: 4 until: curr_docker_version is succeeded @@ -20,7 +20,7 @@ name: "docker{{ '-' + docker_version if docker_version is defined else '' }}" state: present when: - - not (openshift.common.is_atomic | bool) + - not (openshift_is_atomic | bool) - not (curr_docker_version is skipped) - not (curr_docker_version.stdout != '') register: result diff --git a/roles/container_runtime/tasks/registry_auth.yml b/roles/container_runtime/tasks/registry_auth.yml index 2c7bc5711..4f1abd59a 100644 --- a/roles/container_runtime/tasks/registry_auth.yml +++ b/roles/container_runtime/tasks/registry_auth.yml @@ -15,6 +15,7 @@ - not openshift_docker_alternative_creds | bool - oreg_auth_user is defined - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + no_log: True # docker_creds is a custom module from lib_utils # 'docker login' requires a docker.service running on the local host, this is an @@ -30,3 +31,4 @@ - openshift_docker_alternative_creds | bool - oreg_auth_user is defined - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + no_log: True diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml index 61f122f3c..6a195a938 100644 --- a/roles/container_runtime/tasks/systemcontainer_crio.yml +++ b/roles/container_runtime/tasks/systemcontainer_crio.yml @@ -3,7 +3,7 @@ - name: Check we are not using node as a Docker container with CRI-O fail: msg='Cannot use CRI-O with node configured as a Docker container' when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not l_is_node_system_container | bool - include_tasks: common/pre.yml diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml index 639585367..dc0452553 100644 --- a/roles/container_runtime/tasks/systemcontainer_docker.yml +++ b/roles/container_runtime/tasks/systemcontainer_docker.yml @@ -18,7 +18,7 @@ # Make sure Docker is installed so we are able to use the client - name: Install Docker so we can use the client package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index aa976d921..0825af8a5 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -101,7 +101,6 @@ apic_epg_bridge_domain: not_specified apic_configure_default_policy: false apic_default_external_contract: "uni/tn-common/brc-default" apic_default_app_profile: "contiv-infra-app-profile" -is_atomic: False kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master" master_name: "{{ groups['masters'][0] }}" contiv_etcd_port: 22379 @@ -120,4 +119,4 @@ contiv_h1_gw_default: "10.129.0.1" # contiv default private subnet for ext access contiv_private_ext_subnet: "10.130.0.0/16" -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml index d5726476c..3367844a8 100644 --- a/roles/contiv/tasks/packageManagerInstall.yml +++ b/roles/contiv/tasks/packageManagerInstall.yml @@ -5,7 +5,7 @@ - include_tasks: pkgMgrInstallers/centos-install.yml when: (ansible_os_family == "RedHat") and - not is_atomic + not openshift_is_atomic - name: Package Manager | Set fact saying we did CentOS package install set_fact: diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml index 3267a4ab0..ced04759d 100644 --- a/roles/contiv_facts/tasks/main.yml +++ b/roles/contiv_facts/tasks/main.yml @@ -1,19 +1,4 @@ --- -- name: Determine if Atomic - stat: path=/run/ostree-booted - register: s - changed_when: false - check_mode: no - -- name: Init the is_atomic fact - set_fact: - is_atomic: false - -- name: Set the is_atomic fact - set_fact: - is_atomic: true - when: s.stat.exists - - name: Determine if CoreOS raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'" register: distro @@ -85,4 +70,4 @@ when: has_rpm - include_tasks: fedora-install.yml - when: not is_atomic and ansible_distribution == "Fedora" + when: not openshift_is_atomic and ansible_distribution == "Fedora" diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 86cea5c46..87e249642 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -5,7 +5,7 @@ r_etcd_common_backup_sufix_name: '' l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" # runc, docker, host -r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}" +r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if openshift_is_containerized else 'host' }}" r_etcd_common_embedded_etcd: false osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd' @@ -98,4 +98,4 @@ r_etcd_os_firewall_allow: # set the backend quota to 4GB by default etcd_quota_backend_bytes: 4294967296 -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" diff --git a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml index ccfd9da14..881a8c270 100644 --- a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml +++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml @@ -1,7 +1,7 @@ --- - name: Install etcd for etcdctl package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml index a4b0ff31d..3d945344c 100644 --- a/roles/etcd/tasks/migration/add_ttls.yml +++ b/roles/etcd/tasks/migration/add_ttls.yml @@ -11,7 +11,7 @@ - name: Re-introduce leases (as a replacement for key TTLs) command: > - {{ openshift.common.client_binary }} adm migrate etcd-ttl \ + {{ openshift_client_binary }} adm migrate etcd-ttl \ --cert {{ r_etcd_common_master_peer_cert_file }} \ --key {{ r_etcd_common_master_peer_key_file }} \ --cacert {{ r_etcd_common_master_peer_ca_file }} \ diff --git a/roles/etcd/tasks/migration/migrate.yml b/roles/etcd/tasks/migration/migrate.yml index 54a9c74ff..847b1d722 100644 --- a/roles/etcd/tasks/migration/migrate.yml +++ b/roles/etcd/tasks/migration/migrate.yml @@ -1,7 +1,7 @@ --- # Should this be run in a serial manner? - set_fact: - l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}" + l_etcd_service: "{{ 'etcd_container' if openshift_is_containerized else 'etcd' }}" - name: Migrate etcd data command: > diff --git a/roles/etcd/tasks/version_detect.yml b/roles/etcd/tasks/version_detect.yml index fe1e418d8..ab3626cec 100644 --- a/roles/etcd/tasks/version_detect.yml +++ b/roles/etcd/tasks/version_detect.yml @@ -12,7 +12,7 @@ - debug: msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected" when: - - not openshift.common.is_containerized | bool + - not openshift_is_containerized | bool - block: - name: Record containerized etcd version (docker) @@ -52,4 +52,4 @@ - debug: msg: "Etcd containerized version {{ etcd_container_version }} detected" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml index 2e4a0dc39..d9e4d2354 100644 --- a/roles/flannel/defaults/main.yaml +++ b/roles/flannel/defaults/main.yaml @@ -6,4 +6,4 @@ etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/flannel.etcd-ca.crt" etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.crt" etcd_peer_key_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.key" -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index 9b9250f31..4627bf69c 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -2,7 +2,7 @@ - name: Install flannel become: yes package: name=flannel state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py b/roles/lib_openshift/library/conditional_set_fact.py index f61801714..363399f33 100644 --- a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py +++ b/roles/lib_openshift/library/conditional_set_fact.py @@ -29,6 +29,10 @@ EXAMPLES = ''' fact1: not_defined_variable fact2: defined_variable +- name: Conditionally set fact falling back on default + conditional_set_fact: + fact1: not_defined_var | defined_variable + ''' @@ -48,12 +52,14 @@ def run_module(): is_changed = False for param in module.params['vars']: - other_var = module.params['vars'][param] - - if other_var in module.params['facts']: - local_facts[param] = module.params['facts'][other_var] - if not is_changed: - is_changed = True + other_vars = module.params['vars'][param].replace(" ", "") + + for other_var in other_vars.split('|'): + if other_var in module.params['facts']: + local_facts[param] = module.params['facts'][other_var] + if not is_changed: + is_changed = True + break return module.exit_json(changed=is_changed, # noqa: F405 ansible_facts=local_facts) diff --git a/roles/lib_utils/action_plugins/sanity_checks.py b/roles/lib_utils/action_plugins/sanity_checks.py new file mode 100644 index 000000000..1bf332678 --- /dev/null +++ b/roles/lib_utils/action_plugins/sanity_checks.py @@ -0,0 +1,126 @@ +""" +Ansible action plugin to ensure inventory variables are set +appropriately and no conflicting options have been provided. +""" +from ansible.plugins.action import ActionBase +from ansible import errors + +# Valid values for openshift_deployment_type +VALID_DEPLOYMENT_TYPES = ('origin', 'openshift-enterprise') + +# Tuple of variable names and default values if undefined. +NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True), + ('openshift_use_flannel', False), + ('openshift_use_nuage', False), + ('openshift_use_contiv', False), + ('openshift_use_calico', False)) + + +def to_bool(var_to_check): + """Determine a boolean value given the multiple + ways bools can be specified in ansible.""" + # http://yaml.org/type/bool.html + yes_list = (True, 1, "True", "1", "true", "TRUE", + "Yes", "yes", "Y", "y", "YES", + "on", "ON", "On") + return var_to_check in yes_list + + +class ActionModule(ActionBase): + """Action plugin to execute sanity checks.""" + def template_var(self, hostvars, host, varname): + """Retrieve a variable from hostvars and template it. + If undefined, return None type.""" + res = hostvars[host].get(varname) + if res is None: + return None + return self._templar.template(res) + + def check_openshift_deployment_type(self, hostvars, host): + """Ensure a valid openshift_deployment_type is set""" + openshift_deployment_type = self.template_var(hostvars, host, + 'openshift_deployment_type') + if openshift_deployment_type not in VALID_DEPLOYMENT_TYPES: + type_strings = ", ".join(VALID_DEPLOYMENT_TYPES) + msg = "openshift_deployment_type must be defined and one of {}".format(type_strings) + raise errors.AnsibleModuleError(msg) + + def check_python_version(self, hostvars, host, distro): + """Ensure python version is 3 for Fedora and python 2 for others""" + ansible_python = self.template_var(hostvars, host, 'ansible_python') + if distro == "Fedora": + if ansible_python['version']['major'] != 3: + msg = "openshift-ansible requires Python 3 for {};".format(distro) + msg += " For information on enabling Python 3 with Ansible," + msg += " see https://docs.ansible.com/ansible/python_3_support.html" + raise errors.AnsibleModuleError(msg) + else: + if ansible_python['version']['major'] != 2: + msg = "openshift-ansible requires Python 2 for {};".format(distro) + + def network_plugin_check(self, hostvars, host): + """Ensure only one type of network plugin is enabled""" + res = [] + # Loop through each possible network plugin boolean, determine the + # actual boolean value, and append results into a list. + for plugin, default_val in NET_PLUGIN_LIST: + res_temp = self.template_var(hostvars, host, plugin) + if res_temp is None: + res_temp = default_val + res.append(to_bool(res_temp)) + + if sum(res) != 1: + plugin_str = list(zip([x[0] for x in NET_PLUGIN_LIST], res)) + + msg = "Host Checked: {} Only one of must be true. Found: {}".format(host, plugin_str) + raise errors.AnsibleModuleError(msg) + + def check_hostname_vars(self, hostvars, host): + """Checks to ensure openshift_hostname + and openshift_public_hostname + conform to the proper length of 63 characters or less""" + for varname in ('openshift_public_hostname', 'openshift_hostname'): + var_value = self.template_var(hostvars, host, varname) + if var_value and len(var_value) > 63: + msg = '{} must be 63 characters or less'.format(varname) + raise errors.AnsibleModuleError(msg) + + def run_checks(self, hostvars, host): + """Execute the hostvars validations against host""" + distro = self.template_var(hostvars, host, 'ansible_distribution') + self.check_openshift_deployment_type(hostvars, host) + self.check_python_version(hostvars, host, distro) + self.network_plugin_check(hostvars, host) + self.check_hostname_vars(hostvars, host) + + def run(self, tmp=None, task_vars=None): + result = super(ActionModule, self).run(tmp, task_vars) + + # self.task_vars holds all in-scope variables. + # Ignore settting self.task_vars outside of init. + # pylint: disable=W0201 + self.task_vars = task_vars or {} + + # self._task.args holds task parameters. + # check_hosts is a parameter to this plugin, and should provide + # a list of hosts. + check_hosts = self._task.args.get('check_hosts') + if not check_hosts: + msg = "check_hosts is required" + raise errors.AnsibleModuleError(msg) + + # We need to access each host's variables + hostvars = self.task_vars.get('hostvars') + if not hostvars: + msg = hostvars + raise errors.AnsibleModuleError(msg) + + # We loop through each host in the provided list check_hosts + for host in check_hosts: + self.run_checks(hostvars, host) + + result["changed"] = False + result["failed"] = False + result["msg"] = "Sanity Checks passed" + + return result diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml index 677f206ea..50ad7e373 100644 --- a/roles/nickhammond.logrotate/tasks/main.yml +++ b/roles/nickhammond.logrotate/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: nickhammond.logrotate | Install logrotate package: name=logrotate state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/nuage_ca/tasks/main.yaml b/roles/nuage_ca/tasks/main.yaml index d96d0d802..cb7844bc5 100644 --- a/roles/nuage_ca/tasks/main.yaml +++ b/roles/nuage_ca/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Install openssl package: name=openssl state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/nuage_common/tasks/main.yml b/roles/nuage_common/tasks/main.yml index 6c8c9f8d2..ec42518ff 100644 --- a/roles/nuage_common/tasks/main.yml +++ b/roles/nuage_common/tasks/main.yml @@ -2,17 +2,17 @@ - name: Set the Nuage plugin openshift directory fact to handle Atomic host install set_fact: nuage_node_plugin_dir: /var/usr/share/vsp-openshift - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage CNI network config directory fact to handle Atomic host install set_fact: nuage_node_cni_netconf_dir: /var/etc/cni/net.d/ - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage CNI binary directory fact to handle Atomic host install set_fact: nuage_node_cni_bin_dir: /var/opt/cni/bin/ - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Assure CNI plugin config dir exists before daemon set install become: yes diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index c264427de..29e16b6f8 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -5,22 +5,22 @@ - name: Set the Nuage certificate directory fact for Atomic hosts set_fact: cert_output_dir: /var/usr/share/nuage-openshift-monitor - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage kubeconfig file path fact for Atomic hosts set_fact: kube_config: /var/usr/share/nuage-openshift-monitor/nuage.kubeconfig - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage monitor yaml location fact for Atomic hosts set_fact: kubemon_yaml: /var/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage monitor certs location fact for Atomic hosts set_fact: nuage_master_crt_dir: /var/usr/share/nuage-openshift-monitor/ - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage master config directory for daemon sets install set_fact: @@ -35,27 +35,27 @@ - name: Set the Nuage CNI plugin binary directory for daemon sets install set_fact: nuage_cni_bin_dsets_mount_dir: /var/opt/cni/bin - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create directory /usr/share/nuage-openshift-monitor become: yes file: path=/usr/share/nuage-openshift-monitor state=directory - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool - name: Create directory /var/usr/share/nuage-openshift-monitor become: yes file: path=/var/usr/share/nuage-openshift-monitor state=directory - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create directory /var/usr/bin for monitor binary on atomic become: yes file: path=/var/usr/bin state=directory - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create CNI bin directory /var/opt/cni/bin become: yes file: path=/var/opt/cni/bin state=directory - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create the log directory become: yes diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml index fbf2c4f8d..9127b33d6 100644 --- a/roles/nuage_master/tasks/serviceaccount.yml +++ b/roles/nuage_master/tasks/serviceaccount.yml @@ -19,7 +19,7 @@ - name: Generate the node client config command: > - {{ openshift.common.client_binary }} adm create-api-client-config + {{ openshift_client_binary }} adm create-api-client-config --certificate-authority={{ openshift_master_ca_cert }} --client-dir={{ cert_output_dir }} --master={{ openshift.master.api_url }} diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml index c6b7a9b10..1f1bd1653 100644 --- a/roles/nuage_node/tasks/main.yaml +++ b/roles/nuage_node/tasks/main.yaml @@ -2,17 +2,17 @@ - name: Set the Nuage plugin openshift directory fact for Atomic hosts set_fact: vsp_openshift_dir: /var/usr/share/vsp-openshift - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage CNI binary directory fact for Atomic hosts set_fact: cni_bin_dir: /var/opt/cni/bin/ - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Set the Nuage plugin certs directory fact for Atomic hosts set_fact: nuage_plugin_crt_dir: /var/usr/share/vsp-openshift - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Assure CNI conf dir exists become: yes @@ -36,7 +36,7 @@ - name: Add additional Docker mounts for Nuage for atomic hosts become: yes lineinfile: dest="{{ openshift_atomic_node_config_file }}" line="{{ nuage_atomic_docker_additional_mounts }}" - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Restart node services command: /bin/true diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index 358b8528f..b94cd9fba 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -11,7 +11,7 @@ package: name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" state: present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: install_result until: install_result is succeeded delegate_to: "{{ openshift_ca_host }}" @@ -87,7 +87,7 @@ # This should NOT replace the CA due to --overwrite=false when a CA already exists. - name: Create the master certificates if they do not already exist command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-master-certs + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-master-certs {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} @@ -137,7 +137,7 @@ - name: Test local loopback context command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} config view --config={{ openshift_master_loopback_config }} changed_when: false register: loopback_config @@ -154,7 +154,7 @@ register: openshift_ca_loopback_tmpdir - name: Generate the loopback master client config command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config --certificate-authority={{ openshift_ca_cert }} {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 68d82e436..37bed9dbe 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Install clients package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded @@ -18,7 +18,7 @@ tag: "{{ openshift_image_tag }}" backend: "docker" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not l_use_cli_atomic_image | bool - block: @@ -34,7 +34,7 @@ tag: "{{ openshift_image_tag }}" backend: "atomic" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - l_use_cli_atomic_image | bool - name: Reload facts to pick up installed OpenShift version @@ -42,6 +42,6 @@ - name: Install bash completion for oc tools package: name=bash-completion state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded diff --git a/roles/openshift_daemonset_config/defaults/main.yml b/roles/openshift_daemonset_config/defaults/main.yml new file mode 100644 index 000000000..ebe5671d2 --- /dev/null +++ b/roles/openshift_daemonset_config/defaults/main.yml @@ -0,0 +1,19 @@ +--- +openshift_daemonset_config_namespace: openshift-node +openshift_daemonset_config_daemonset_name: ops-node-config +openshift_daemonset_config_configmap_name: "{{ openshift_daemonset_config_daemonset_name }}" +openshift_daemonset_config_node_selector: + config: config +openshift_daemonset_config_sa_name: ops +openshift_daemonset_config_configmap_files: {} +openshift_daemonset_config_configmap_literals: {} +openshift_daemonset_config_monitoring: False +openshift_daemonset_config_interval: 300 +openshift_daemonset_config_script: config.sh +openshift_daemonset_config_secret_name: operations-config-secret +openshift_daemonset_config_secrets: {} +openshift_daemonset_config_runasuser: 0 +openshift_daemonset_config_privileged: True +openshift_daemonset_config_resources: + cpu: 10m + memory: 10Mi diff --git a/roles/openshift_daemonset_config/meta/main.yml b/roles/openshift_daemonset_config/meta/main.yml new file mode 100644 index 000000000..d2bbd2576 --- /dev/null +++ b/roles/openshift_daemonset_config/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- lib_openshift diff --git a/roles/openshift_daemonset_config/tasks/main.yml b/roles/openshift_daemonset_config/tasks/main.yml new file mode 100644 index 000000000..450cc9dca --- /dev/null +++ b/roles/openshift_daemonset_config/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: add a sa + oc_serviceaccount: + name: "{{ openshift_daemonset_config_sa_name }}" + namespace: "{{ openshift_daemonset_config_namespace }}" + +- name: add sa to privileged scc + oc_adm_policy_user: + namespace: "{{ openshift_daemonset_config_namespace }}" + resource_kind: scc + resource_name: privileged + state: present + user: "system:serviceaccount:{{ openshift_daemonset_config_namespace }}:{{ openshift_daemonset_config_sa_name }}" + +- name: copy template to disk + template: + dest: "/tmp/{{ item.name }}" + src: "{{ item.name }}.j2" + with_items: + - name: daemonset.yml + +- name: copy files to disk + copy: + src: "{{ item.key }}" + dest: "{{ item.value }}" + with_dict: "{{ openshift_daemonset_config_configmap_files }}" + +- name: create the namespace + oc_project: + state: present + name: "{{ openshift_daemonset_config_namespace }}" + +- name: lay down secrets + oc_secret: + state: present + name: "{{ openshift_daemonset_config_secret_name }}" + namespace: "{{ openshift_daemonset_config_namespace }}" + delete_after: true + contents: "{{ openshift_daemonset_config_secrets }}" + when: + - openshift_daemonset_config_secrets != {} + +- name: create the configmap + oc_configmap: + state: present + name: "{{ openshift_daemonset_config_configmap_name }}" + namespace: "{{ openshift_daemonset_config_namespace }}" + from_literal: "{{ openshift_daemonset_config_configmap_literals }}" + from_file: "{{ openshift_daemonset_config_configmap_files }}" + +- name: deploy daemonset + oc_obj: + state: present + namespace: "{{ openshift_daemonset_config_namespace }}" # openshift-node?? + name: "{{ openshift_daemonset_config_daemonset_name }}" + kind: daemonset + files: + - /tmp/daemonset.yml diff --git a/roles/openshift_daemonset_config/templates/daemonset.yml.j2 b/roles/openshift_daemonset_config/templates/daemonset.yml.j2 new file mode 100644 index 000000000..9792f6d16 --- /dev/null +++ b/roles/openshift_daemonset_config/templates/daemonset.yml.j2 @@ -0,0 +1,142 @@ +--- +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: {{ openshift_daemonset_config_daemonset_name }} + annotations: + kubernetes.io/description: | + This daemon set manages the operational configuration for a cluster and ensures all nodes have + a concrete set of config in place. It could also use a local ansible run against the /host directory. +spec: + selector: + matchLabels: + app: {{ openshift_daemonset_config_daemonset_name }} + confighosts: ops + ops.openshift.io/role: operations + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: {{ openshift_daemonset_config_daemonset_name }} + confighosts: ops + ops.openshift.io/role: operations + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: +{% if openshift_daemonset_config_node_selector is defined and openshift_daemonset_config_node_selector != {} %} + nodeSelector: {{ openshift_daemonset_config_node_selector | to_json }} +{% endif %} + serviceAccountName: {{ openshift_daemonset_config_sa_name }} + hostNetwork: true + hostPID: true + hostIPC: true + containers: + - name: config + image: centos:7 + env: + - name: RESYNC_INTERVAL + value: "{{ openshift_daemonset_config_interval }}" + command: + - /bin/bash + - -c + - | + #!/bin/sh + set -o errexit + + while true; do + + # execute user defined script + sh /opt/config/{{ openshift_daemonset_config_script }} + + # sleep for ${RESYNC_INTERVAL} minutes, then loop. if we fail Kubelet will restart us again + echo "Success, sleeping for ${RESYNC_INTERVAL}s" + exec sleep ${RESYNC_INTERVAL} + + # Return to perform the config + done + securityContext: + # Must be root to modify host system + runAsUser: {{ openshift_daemonset_config_runasuser }} + # Permission could be reduced by selecting an appropriate SELinux policy that allows + # us to update the named directories + privileged: {{ openshift_daemonset_config_privileged }} + volumeMounts: + # Directory which contains the host volume. + - mountPath: /host + name: host + # Our node configuration + - mountPath: /opt/config + name: config +{% if openshift_daemonset_config_secrets != {} %} + # Our delivered secrets + - mountPath: /opt/secrets + name: secrets +{% endif %} + resources: + requests: + cpu: {{ openshift_daemonset_config_resources.cpu }} + memory: {{ openshift_daemonset_config_resources.memory }} +{% if openshift_daemonset_config_monitoring %} + - name: monitoring + image: openshifttools/oso-centos7-host-monitoring:latest + securityContext: + # Must be root to read content + runAsUser: 0 + privileged: true + + volumeMounts: + - mountPath: /host + name: host + readOnly: true + - mountPath: /etc/localtime + subPath: etc/localtime + name: host + readOnly: true + - mountPath: /sys + subPath: sys + name: host + readOnly: true + - mountPath: /var/run/docker.sock + subPath: var/run/docker.sock + name: host + readOnly: true + - mountPath: /var/run/openvswitch + subPath: var/run/openvswitch + name: host + readOnly: true + - mountPath: /etc/origin + subPath: etc/origin + name: host + readOnly: true + - mountPath: /usr/bin/oc + subPath: usr/bin/oc + name: host + readOnly: true + name: host + readOnly: true + - mountPath: /host/var/cache/yum + subPath: var/cache/yum + name: host + - mountPath: /container_setup/monitoring-config.yml + subPath: monitoring-config.yaml + name: config + - mountPath: /opt/config + name: config + resources: + requests: + cpu: 10m + memory: 10Mi +{% endif %} + volumes: + - name: config + configMap: + name: {{ openshift_daemonset_config_configmap_name }} +{% if openshift_daemonset_config_secrets != {} %} + - name: secrets + secret: + secretName: {{ openshift_daemonset_config_secret_name }} +{% endif %} + - name: host + hostPath: + path: / diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml index 0c072b64a..9e635b34f 100644 --- a/roles/openshift_etcd_facts/vars/main.yml +++ b/roles/openshift_etcd_facts/vars/main.yml @@ -1,6 +1,6 @@ --- -etcd_is_containerized: "{{ openshift.common.is_containerized }}" -etcd_is_atomic: "{{ openshift.common.is_atomic }}" +etcd_is_containerized: "{{ openshift_is_containerized }}" +etcd_is_atomic: "{{ openshift_is_atomic }}" etcd_hostname: "{{ openshift.common.hostname }}" etcd_ip: "{{ openshift.common.ip }}" etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}" diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml index e623b33f3..0a6e8f20c 100644 --- a/roles/openshift_examples/defaults/main.yml +++ b/roles/openshift_examples/defaults/main.yml @@ -8,7 +8,7 @@ openshift_examples_load_quickstarts: true content_version: "{{ openshift.common.examples_content_version }}" -examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples" +examples_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/examples" image_streams_base: "{{ examples_base }}/image-streams" centos_image_streams: - "{{ image_streams_base }}/image-streams-centos7.json" diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index 356317431..a09a598bd 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -53,7 +53,7 @@ # RHEL and Centos image streams are mutually exclusive - name: Import RHEL streams command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} when: openshift_examples_load_rhel | bool with_items: - "{{ rhel_image_streams }}" @@ -63,7 +63,7 @@ - name: Import Centos Image streams command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} when: openshift_examples_load_centos | bool with_items: - "{{ centos_image_streams }}" @@ -73,7 +73,7 @@ - name: Import db templates command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }} when: openshift_examples_load_db_templates | bool register: oex_import_db_templates failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0" @@ -90,7 +90,7 @@ - "{{ quickstarts_base }}/django.json" - name: Remove defunct quickstart templates from openshift namespace - command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" + command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" with_items: - nodejs-example - cakephp-example @@ -102,7 +102,7 @@ - name: Import quickstart-templates command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }} when: openshift_examples_load_quickstarts | bool register: oex_import_quickstarts failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0" @@ -116,7 +116,7 @@ - "{{ xpaas_templates_base }}/sso70-basic.json" - name: Remove old xPaas templates from openshift namespace - command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" + command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" with_items: - sso70-basic register: oex_delete_old_xpaas_templates @@ -125,7 +125,7 @@ - name: Import xPaas image streams command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }} when: openshift_examples_load_xpaas | bool register: oex_import_xpaas_streams failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0" @@ -133,7 +133,7 @@ - name: Import xPaas templates command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }} + {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }} when: openshift_examples_load_xpaas | bool register: oex_import_xpaas_templates failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0" diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml index 12fecaff5..6532d7fe2 100644 --- a/roles/openshift_excluder/tasks/install.yml +++ b/roles/openshift_excluder/tasks/install.yml @@ -1,7 +1,7 @@ --- - when: - - not openshift.common.is_atomic | bool + - not openshift_is_atomic | bool - r_openshift_excluder_install_ran is not defined block: diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml index c7e21ba99..5ae863871 100644 --- a/roles/openshift_expand_partition/tasks/main.yml +++ b/roles/openshift_expand_partition/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Ensure growpart is installed package: name=cloud-utils-growpart state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded @@ -10,7 +10,7 @@ register: has_growpart failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout changed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Grow the partitions command: "growpart {{oep_drive}} {{oep_partition}}" diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml index 804b274a2..980350d14 100644 --- a/roles/openshift_facts/defaults/main.yml +++ b/roles/openshift_facts/defaults/main.yml @@ -1,8 +1,13 @@ --- +openshift_client_binary: "{{ openshift_is_containerized | ternary('/usr/local/bin/oc', 'oc') }}" + openshift_cli_image_dict: origin: 'openshift/origin' openshift-enterprise: 'openshift3/ose' +repoquery_cmd: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0', 'repoquery --plugins') }}" +repoquery_installed: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed', 'repoquery --plugins --installed') }}" + openshift_hosted_images_dict: origin: 'openshift/origin-${component}:${version}' openshift-enterprise: 'openshift3/ose-${component}:${version}' diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 6170f15d9..d7c358a2f 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -656,26 +656,6 @@ def set_nodename(facts): return facts -def migrate_oauth_template_facts(facts): - """ - Migrate an old oauth template fact to a newer format if it's present. - - The legacy 'oauth_template' fact was just a filename, and assumed you were - setting the 'login' template. - - The new pluralized 'oauth_templates' fact is a dict mapping the template - name to a filename. - - Simplify the code after this by merging the old fact into the new. - """ - if 'master' in facts and 'oauth_template' in facts['master']: - if 'oauth_templates' not in facts['master']: - facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']} - elif 'login' not in facts['master']['oauth_templates']: - facts['master']['oauth_templates']['login'] = facts['master']['oauth_template'] - return facts - - def format_url(use_ssl, hostname, port, path=''): """ Format url based on ssl flag, hostname, port and path @@ -887,7 +867,7 @@ def get_openshift_version(facts): if os.path.isfile('/usr/bin/openshift'): _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405 version = parse_openshift_version(output) - elif 'common' in facts and 'is_containerized' in facts['common']: + else: version = get_container_openshift_version(facts) # Handle containerized masters that have not yet been configured as a node. @@ -1278,36 +1258,7 @@ def set_container_facts_if_unset(facts): dict: the facts dict updated with the generated containerization facts """ - facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') - if 'is_containerized' not in facts['common']: - facts['common']['is_containerized'] = facts['common']['is_atomic'] - - if safe_get_bool(facts['common']['is_containerized']): - facts['common']['client_binary'] = '/usr/local/bin/oc' - - return facts - - -def set_installed_variant_rpm_facts(facts): - """ Set RPM facts of installed variant - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with installed_variant_rpms - """ - installed_rpms = [] - for base_rpm in ['openshift', 'atomic-openshift', 'origin']: - optional_rpms = ['master', 'node', 'clients', 'sdn-ovs'] - variant_rpms = [base_rpm] + \ - ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \ - ['tuned-profiles-%s-node' % base_rpm] - for rpm in variant_rpms: - exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405 - if exit_code == 0: - installed_rpms.append(rpm) - - facts['common']['installed_variant_rpms'] = installed_rpms return facts @@ -1416,7 +1367,6 @@ class OpenShiftFacts(object): facts = merge_facts(facts, local_facts, additive_facts_to_overwrite) - facts = migrate_oauth_template_facts(facts) facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) facts = set_identity_providers_if_unset(facts) @@ -1430,8 +1380,6 @@ class OpenShiftFacts(object): facts = set_proxy_facts(facts) facts = set_builddefaults_facts(facts) facts = set_buildoverrides_facts(facts) - if not safe_get_bool(facts['common']['is_containerized']): - facts = set_installed_variant_rpm_facts(facts) facts = set_nodename(facts) return dict(openshift=facts) @@ -1459,7 +1407,6 @@ class OpenShiftFacts(object): hostname=hostname, public_hostname=hostname, portal_net='172.30.0.0/16', - client_binary='oc', dns_domain='cluster.local', config_base='/etc/origin') diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 4f91f6bb3..744b79c1a 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -160,7 +160,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): required.add(self._registry_console_image(image_tag, image_info)) # images for containerized components - if self.get_var("openshift", "common", "is_containerized"): + if self.get_var("openshift_is_containerized"): components = set() if 'oo_nodes_to_config' in host_groups: components.update(["node", "openvswitch"]) diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py index 8b20ccb49..b56d2092b 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py @@ -20,8 +20,8 @@ class EtcdTraffic(OpenShiftCheck): return super(EtcdTraffic, self).is_active() and valid_group_names and valid_version def run(self): - is_containerized = self.get_var("openshift", "common", "is_containerized") - unit = "etcd_container" if is_containerized else "etcd" + openshift_is_containerized = self.get_var("openshift_is_containerized") + unit = "etcd_container" if openshift_is_containerized else "etcd" log_matchers = [{ "start_regexp": r"Starting Etcd Server", diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index cfbdea303..567162be1 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -10,8 +10,8 @@ class NotContainerizedMixin(object): def is_active(self): """Only run on non-containerized hosts.""" - is_containerized = self.get_var("openshift", "common", "is_containerized") - return super(NotContainerizedMixin, self).is_active() and not is_containerized + openshift_is_containerized = self.get_var("openshift_is_containerized") + return super(NotContainerizedMixin, self).is_active() and not openshift_is_containerized class DockerHostMixin(object): @@ -23,7 +23,7 @@ class DockerHostMixin(object): """Only run on hosts that depend on Docker.""" group_names = set(self.get_var("group_names", default=[])) needs_docker = set(["oo_nodes_to_config"]) - if self.get_var("openshift.common.is_containerized"): + if self.get_var("openshift_is_containerized"): needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"]) return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker)) @@ -33,7 +33,7 @@ class DockerHostMixin(object): (which would not be able to install but should already have them). Returns: msg, failed """ - if self.get_var("openshift", "common", "is_atomic"): + if self.get_var("openshift_is_atomic"): return "", False # NOTE: we would use the "package" module but it's actually an action plugin diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index fc333dfd4..9fd6e049d 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -6,13 +6,8 @@ from openshift_checks.docker_image_availability import DockerImageAvailability, @pytest.fixture() def task_vars(): return dict( - openshift=dict( - common=dict( - is_containerized=False, - is_atomic=False, - ), - docker=dict(), - ), + openshift_is_atomic=False, + openshift_is_containerized=False, openshift_service_type='origin', openshift_deployment_type='origin', openshift_image_tag='', @@ -20,7 +15,7 @@ def task_vars(): ) -@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [ +@pytest.mark.parametrize('deployment_type, openshift_is_containerized, group_names, expect_active', [ ("invalid", True, [], False), ("", True, [], False), ("origin", False, [], False), @@ -30,20 +25,20 @@ def task_vars(): ("origin", True, ["nfs"], False), ("openshift-enterprise", True, ["lb"], False), ]) -def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active): +def test_is_active(task_vars, deployment_type, openshift_is_containerized, group_names, expect_active): task_vars['openshift_deployment_type'] = deployment_type - task_vars['openshift']['common']['is_containerized'] = is_containerized + task_vars['openshift_is_containerized'] = openshift_is_containerized task_vars['group_names'] = group_names assert DockerImageAvailability(None, task_vars).is_active() == expect_active -@pytest.mark.parametrize("is_containerized,is_atomic", [ +@pytest.mark.parametrize("openshift_is_containerized,openshift_is_atomic", [ (True, True), (False, False), (True, False), (False, True), ]) -def test_all_images_available_locally(task_vars, is_containerized, is_atomic): +def test_all_images_available_locally(task_vars, openshift_is_containerized, openshift_is_atomic): def execute_module(module_name, module_args, *_): if module_name == "yum": return {} @@ -55,8 +50,8 @@ def test_all_images_available_locally(task_vars, is_containerized, is_atomic): 'images': [module_args['name']], } - task_vars['openshift']['common']['is_containerized'] = is_containerized - task_vars['openshift']['common']['is_atomic'] = is_atomic + task_vars['openshift_is_containerized'] = openshift_is_containerized + task_vars['openshift_is_atomic'] = openshift_is_atomic result = DockerImageAvailability(execute_module, task_vars).run() assert not result.get('failed', False) @@ -172,7 +167,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo assert expect_registries_reached == check.reachable_registries -@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [ +@pytest.mark.parametrize("deployment_type, openshift_is_containerized, groups, oreg_url, expected", [ ( # standard set of stuff required on nodes "origin", False, ['oo_nodes_to_config'], "", set([ @@ -232,14 +227,10 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo ), ]) -def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected): +def test_required_images(deployment_type, openshift_is_containerized, groups, oreg_url, expected): task_vars = dict( - openshift=dict( - common=dict( - is_containerized=is_containerized, - is_atomic=False, - ), - ), + openshift_is_containerized=openshift_is_containerized, + openshift_is_atomic=False, openshift_deployment_type=deployment_type, group_names=groups, oreg_url=oreg_url, @@ -287,11 +278,7 @@ def test_registry_console_image(task_vars, expected): def test_containerized_etcd(): task_vars = dict( - openshift=dict( - common=dict( - is_containerized=True, - ), - ), + openshift_is_containerized=True, openshift_deployment_type="origin", group_names=['oo_etcd_to_config'], ) diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py index 8fa68c378..33a5dd90a 100644 --- a/roles/openshift_health_checker/test/docker_storage_test.py +++ b/roles/openshift_health_checker/test/docker_storage_test.py @@ -4,21 +4,21 @@ from openshift_checks import OpenShiftCheckException from openshift_checks.docker_storage import DockerStorage -@pytest.mark.parametrize('is_containerized, group_names, is_active', [ +@pytest.mark.parametrize('openshift_is_containerized, group_names, is_active', [ (False, ["oo_masters_to_config", "oo_etcd_to_config"], False), (False, ["oo_masters_to_config", "oo_nodes_to_config"], True), (True, ["oo_etcd_to_config"], True), ]) -def test_is_active(is_containerized, group_names, is_active): +def test_is_active(openshift_is_containerized, group_names, is_active): task_vars = dict( - openshift=dict(common=dict(is_containerized=is_containerized)), + openshift_is_containerized=openshift_is_containerized, group_names=group_names, ) assert DockerStorage(None, task_vars).is_active() == is_active def non_atomic_task_vars(): - return {"openshift": {"common": {"is_atomic": False}}} + return {"openshift_is_atomic": False} @pytest.mark.parametrize('docker_info, failed, expect_msg', [ diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py index a29dc166b..583c4c8dd 100644 --- a/roles/openshift_health_checker/test/etcd_traffic_test.py +++ b/roles/openshift_health_checker/test/etcd_traffic_test.py @@ -36,9 +36,7 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words) task_vars = dict( group_names=group_names, - openshift=dict( - common=dict(is_containerized=False), - ), + openshift_is_containerized=False, openshift_service_type="origin" ) @@ -50,15 +48,13 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words) assert result.get("failed", False) == failed -@pytest.mark.parametrize('is_containerized,expected_unit_value', [ +@pytest.mark.parametrize('openshift_is_containerized,expected_unit_value', [ (False, "etcd"), (True, "etcd_container"), ]) -def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_value): +def test_systemd_unit_matches_deployment_type(openshift_is_containerized, expected_unit_value): task_vars = dict( - openshift=dict( - common=dict(is_containerized=is_containerized), - ) + openshift_is_containerized=openshift_is_containerized ) def execute_module(module_name, args, *_): diff --git a/roles/openshift_health_checker/test/mixins_test.py b/roles/openshift_health_checker/test/mixins_test.py index b1a41ca3c..b5d6f2e95 100644 --- a/roles/openshift_health_checker/test/mixins_test.py +++ b/roles/openshift_health_checker/test/mixins_test.py @@ -10,8 +10,8 @@ class NotContainerizedCheck(NotContainerizedMixin, OpenShiftCheck): @pytest.mark.parametrize('task_vars,expected', [ - (dict(openshift=dict(common=dict(is_containerized=False))), True), - (dict(openshift=dict(common=dict(is_containerized=True))), False), + (dict(openshift_is_containerized=False), True), + (dict(openshift_is_containerized=True), False), ]) def test_is_active(task_vars, expected): assert NotContainerizedCheck(None, task_vars).is_active() == expected @@ -20,4 +20,4 @@ def test_is_active(task_vars, expected): def test_is_active_missing_task_vars(): with pytest.raises(OpenShiftCheckException) as excinfo: NotContainerizedCheck().is_active() - assert 'is_containerized' in str(excinfo.value) + assert 'openshift_is_containerized' in str(excinfo.value) diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py index dd98ff4d8..0238f49d5 100644 --- a/roles/openshift_health_checker/test/ovs_version_test.py +++ b/roles/openshift_health_checker/test/ovs_version_test.py @@ -70,7 +70,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version): assert result is return_value -@pytest.mark.parametrize('group_names,is_containerized,is_active', [ +@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [ (['oo_masters_to_config'], False, True), # ensure check is skipped on containerized installs (['oo_masters_to_config'], True, False), @@ -82,9 +82,9 @@ def test_ovs_package_version(openshift_release, expected_ovs_version): (['lb'], False, False), (['nfs'], False, False), ]) -def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active): +def test_ovs_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active): task_vars = dict( group_names=group_names, - openshift=dict(common=dict(is_containerized=is_containerized)), + openshift_is_containerized=openshift_is_containerized, ) assert OvsVersion(None, task_vars).is_active() == is_active diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py index a1e6e0879..52740093d 100644 --- a/roles/openshift_health_checker/test/package_availability_test.py +++ b/roles/openshift_health_checker/test/package_availability_test.py @@ -3,16 +3,16 @@ import pytest from openshift_checks.package_availability import PackageAvailability -@pytest.mark.parametrize('pkg_mgr,is_containerized,is_active', [ +@pytest.mark.parametrize('pkg_mgr,openshift_is_containerized,is_active', [ ('yum', False, True), ('yum', True, False), ('dnf', True, False), ('dnf', False, False), ]) -def test_is_active(pkg_mgr, is_containerized, is_active): +def test_is_active(pkg_mgr, openshift_is_containerized, is_active): task_vars = dict( ansible_pkg_mgr=pkg_mgr, - openshift=dict(common=dict(is_containerized=is_containerized)), + openshift_is_containerized=openshift_is_containerized, ) assert PackageAvailability(None, task_vars).is_active() == is_active diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index ea8e02b97..d2916f617 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -99,7 +99,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc assert result == return_value -@pytest.mark.parametrize('group_names,is_containerized,is_active', [ +@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [ (['oo_masters_to_config'], False, True), # ensure check is skipped on containerized installs (['oo_masters_to_config'], True, False), @@ -111,9 +111,9 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc (['lb'], False, False), (['nfs'], False, False), ]) -def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active): +def test_package_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active): task_vars = dict( group_names=group_names, - openshift=dict(common=dict(is_containerized=is_containerized)), + openshift_is_containerized=openshift_is_containerized, ) assert PackageVersion(None, task_vars).is_active() == is_active diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml index 429f0c514..22294e3d4 100644 --- a/roles/openshift_hosted/tasks/registry.yml +++ b/roles/openshift_hosted/tasks/registry.yml @@ -1,10 +1,4 @@ --- -- name: Create temp directory for doing work in - command: mktemp -d /tmp/openshift-hosted-ansible-XXXXXX - register: mktempHosted - changed_when: False - check_mode: no - - name: setup firewall import_tasks: firewall.yml vars: @@ -132,25 +126,10 @@ edits: "{{ openshift_hosted_registry_edits }}" force: "{{ True|bool in openshift_hosted_registry_force }}" +# TODO(michaelgugino) remove this set fact. It is currently necessary due to +# custom module not properly templating variables. - name: setup registry list set_fact: r_openshift_hosted_registry_list: - name: "{{ openshift_hosted_registry_name }}" namespace: "{{ openshift_hosted_registry_namespace }}" - -- name: Wait for pod (Registry) - include_tasks: wait_for_pod.yml - vars: - l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}" - l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}" - -- include_tasks: storage/glusterfs.yml - when: - - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap - -- name: Delete temp directory - file: - name: "{{ mktempHosted.stdout }}" - state: absent - changed_when: False - check_mode: no diff --git a/roles/openshift_hosted/tasks/registry_storage.yml b/roles/openshift_hosted/tasks/registry_storage.yml new file mode 100644 index 000000000..aa66a7867 --- /dev/null +++ b/roles/openshift_hosted/tasks/registry_storage.yml @@ -0,0 +1,4 @@ +--- +- include_tasks: storage/glusterfs.yml + when: + - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml index 8ecaacb4a..2dc9c98f6 100644 --- a/roles/openshift_hosted/tasks/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -98,9 +98,3 @@ ports: "{{ item.ports }}" stats_port: "{{ item.stats_port }}" with_items: "{{ openshift_hosted_routers }}" - -- name: Wait for pod (Routers) - include_tasks: wait_for_pod.yml - vars: - l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}" - l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}" diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml index bd7181c17..77f020357 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml @@ -10,7 +10,7 @@ dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - name: Create GlusterFS registry service and endpoint - command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}" + command: "{{ openshift_client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}" with_items: - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml" diff --git a/roles/openshift_hosted/tasks/wait_for_pod.yml b/roles/openshift_hosted/tasks/wait_for_pod.yml index 056c79334..a14b0febc 100644 --- a/roles/openshift_hosted/tasks/wait_for_pod.yml +++ b/roles/openshift_hosted/tasks/wait_for_pod.yml @@ -3,17 +3,17 @@ block: - name: Ensure OpenShift pod correctly rolls out (best-effort today) command: | - {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \ + {{ openshift_client_binary }} rollout status deploymentconfig {{ item.name }} \ --namespace {{ item.namespace | default('default') }} \ --config {{ openshift_master_config_dir }}/admin.kubeconfig async: 600 - poll: 15 + poll: 5 with_items: "{{ l_openshift_hosted_wfp_items }}" failed_when: false - name: Determine the latest version of the OpenShift pod deployment command: | - {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \ + {{ openshift_client_binary }} get deploymentconfig {{ item.name }} \ --namespace {{ item.namespace }} \ --config {{ openshift_master_config_dir }}/admin.kubeconfig \ -o jsonpath='{ .status.latestVersion }' @@ -22,14 +22,14 @@ - name: Poll for OpenShift pod deployment success command: | - {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ + {{ openshift_client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ --namespace {{ item.0.namespace }} \ --config {{ openshift_master_config_dir }}/admin.kubeconfig \ -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' register: openshift_hosted_wfp_rc_phase until: "'Running' not in openshift_hosted_wfp_rc_phase.stdout" - delay: 15 - retries: 40 + delay: 5 + retries: 60 failed_when: "'Failed' in openshift_hosted_wfp_rc_phase.stdout" with_together: - "{{ l_openshift_hosted_wfp_items }}" diff --git a/roles/openshift_hosted_templates/defaults/main.yml b/roles/openshift_hosted_templates/defaults/main.yml index f4fd15089..48d62c8df 100644 --- a/roles/openshift_hosted_templates/defaults/main.yml +++ b/roles/openshift_hosted_templates/defaults/main.yml @@ -1,5 +1,5 @@ --- -hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" +hosted_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/hosted" hosted_deployment_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'enterprise' }}" content_version: "{{ openshift.common.examples_content_version }}" diff --git a/roles/openshift_hosted_templates/tasks/main.yml b/roles/openshift_hosted_templates/tasks/main.yml index 89b92dfcc..b2313c297 100644 --- a/roles/openshift_hosted_templates/tasks/main.yml +++ b/roles/openshift_hosted_templates/tasks/main.yml @@ -52,7 +52,7 @@ - name: Create or update hosted templates command: > - {{ openshift.common.client_binary }} {{ openshift_hosted_templates_import_command }} + {{ openshift_client_binary }} {{ openshift_hosted_templates_import_command }} -f {{ hosted_base }} --config={{ openshift_hosted_templates_kubeconfig }} -n openshift diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index 6ffe3f11e..d8c45fb33 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -32,7 +32,7 @@ r_openshift_loadbalancer_os_firewall_allow: port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp" cond: "{{ r_openshift_lb_use_nuage | bool }}" -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" # NOTE # r_openshift_lb_use_nuage_default may be defined external to this role. diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml index 3ca6c8cbe..4a11029ab 100644 --- a/roles/openshift_loadbalancer/tasks/main.yml +++ b/roles/openshift_loadbalancer/tasks/main.yml @@ -4,33 +4,33 @@ - name: Install haproxy package: name=haproxy state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded - name: Pull haproxy image command: > docker pull {{ openshift_router_image }}:{{ openshift_image_tag }} - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Create config directory for haproxy file: path: /etc/haproxy state: directory - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Create the systemd unit files template: src: "haproxy.docker.service.j2" dest: "/etc/systemd/system/haproxy.service" - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool notify: restart haproxy - name: Configure systemd service directory for haproxy file: path: /etc/systemd/system/haproxy.service.d state: directory - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool # Work around ini_file create option in 2.2 which defaults to no - name: Create limits.conf file @@ -41,7 +41,7 @@ owner: root group: root changed_when: false - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - name: Configure the nofile limits for haproxy ini_file: @@ -50,7 +50,7 @@ option: LimitNOFILE value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}" notify: restart haproxy - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - name: Configure haproxy template: diff --git a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 index 24fd635ec..de5a8d7c2 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 @@ -3,7 +3,7 @@ global maxconn {{ openshift_loadbalancer_global_maxconn | default(20000) }} log /dev/log local0 info -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %} stats socket /var/lib/haproxy/run/haproxy.sock mode 600 level admin {% else %} chroot /var/lib/haproxy diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index e1a5ea726..ba412b5a6 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -102,6 +102,28 @@ def serviceaccount_namespace(qualified_sa, default=None): return seg[-1] +def flatten_dict(data, parent_key=None): + """ This filter plugin will flatten a dict and its sublists into a single dict + """ + if not isinstance(data, dict): + raise RuntimeError("flatten_dict failed, expects to flatten a dict") + + merged = dict() + + for key in data: + if parent_key is not None: + insert_key = '.'.join((parent_key, key)) + else: + insert_key = key + + if isinstance(data[key], dict): + merged.update(flatten_dict(data[key], insert_key)) + else: + merged[insert_key] = data[key] + + return merged + + # pylint: disable=too-few-public-methods class FilterModule(object): ''' OpenShift Logging Filters ''' @@ -117,5 +139,6 @@ class FilterModule(object): 'es_storage': es_storage, 'serviceaccount_name': serviceaccount_name, 'serviceaccount_namespace': serviceaccount_namespace, - 'walk': walk + 'walk': walk, + "flatten_dict": flatten_dict } diff --git a/roles/openshift_logging/library/logging_patch.py b/roles/openshift_logging/library/logging_patch.py new file mode 100644 index 000000000..d2c0bc456 --- /dev/null +++ b/roles/openshift_logging/library/logging_patch.py @@ -0,0 +1,112 @@ +#!/usr/bin/python + +""" Ansible module to help with creating context patch file with whitelisting for logging """ + +import difflib +import re + +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: logging_patch + +short_description: This will create a context patch file while giving ability + to whitelist some lines (excluding them from comparison) + +description: + - "To create configmap patches for logging" + +author: + - Eric Wolinetz ewolinet@redhat.com +''' + + +EXAMPLES = ''' +- logging_patch: + original_file: "{{ tempdir }}/current.yml" + new_file: "{{ configmap_new_file }}" + whitelist: "{{ configmap_protected_lines | default([]) }}" + +''' + + +def account_for_whitelist(file_contents, white_list=None): + """ This method will remove lines that contain whitelist values from the content + of the file so that we aren't build a patch based on that line + + Usage: + + for file_contents: + + index: + number_of_shards: {{ es_number_of_shards | default ('1') }} + number_of_replicas: {{ es_number_of_replicas | default ('0') }} + unassigned.node_left.delayed_timeout: 2m + translog: + flush_threshold_size: 256mb + flush_threshold_period: 5m + + + and white_list: + + ['number_of_shards', 'number_of_replicas'] + + + We would end up with: + + index: + unassigned.node_left.delayed_timeout: 2m + translog: + flush_threshold_size: 256mb + flush_threshold_period: 5m + + """ + + for line in white_list: + file_contents = re.sub(r".*%s:.*\n" % line, "", file_contents) + + return file_contents + + +def run_module(): + """ The body of the module, we check if the variable name specified as the value + for the key is defined. If it is then we use that value as for the original key """ + + module = AnsibleModule( + argument_spec=dict( + original_file=dict(type='str', required=True), + new_file=dict(type='str', required=True), + whitelist=dict(required=False, type='list', default=[]) + ), + supports_check_mode=True + ) + + original_fh = open(module.params['original_file'], "r") + original_contents = original_fh.read() + original_fh.close() + + original_contents = account_for_whitelist(original_contents, module.params['whitelist']) + + new_fh = open(module.params['new_file'], "r") + new_contents = new_fh.read() + new_fh.close() + + new_contents = account_for_whitelist(new_contents, module.params['whitelist']) + + uni_diff = difflib.unified_diff(new_contents.splitlines(), + original_contents.splitlines(), + lineterm='') + + return module.exit_json(changed=False, # noqa: F405 + raw_patch="\n".join(uni_diff)) + + +def main(): + """ main """ + run_module() + + +if __name__ == '__main__': + main() diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index 98d0d1c4f..302a9b4c9 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -204,6 +204,14 @@ class OpenshiftLoggingFacts(OCBaseCommand): if comp is not None: self.add_facts_for(comp, "services", name, dict()) + # pylint: disable=too-many-arguments + def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None): + '''Extracts facts in logging namespace from configmap''' + if yaml_file is not None: + config_facts = yaml.load(yaml_file) + self.facts[comp][kind][name][config_key] = config_facts + self.facts[comp][kind][name]["raw"] = yaml_file + def facts_for_configmaps(self, namespace): ''' Gathers facts for configmaps in logging namespace ''' self.default_keys_for("configmaps") @@ -214,7 +222,10 @@ class OpenshiftLoggingFacts(OCBaseCommand): name = item["metadata"]["name"] comp = self.comp(name) if comp is not None: - self.add_facts_for(comp, "configmaps", name, item["data"]) + self.add_facts_for(comp, "configmaps", name, dict(item["data"])) + if comp in ["elasticsearch", "elasticsearch_ops"]: + for config_key in item["data"]: + self.facts_from_configmap(comp, "configmaps", name, config_key, item["data"][config_key]) def facts_for_oauthclients(self, namespace): ''' Gathers facts for oauthclients used with logging ''' diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml index 59d6098d4..4a2ee64f0 100644 --- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml +++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml @@ -1,6 +1,6 @@ --- - command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get namespaces -o jsonpath={.items[*].metadata.name} {{ __default_logging_ops_projects | join(' ') }} register: __logging_ops_projects diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index af36d67c6..51d6d0efd 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -109,14 +109,14 @@ # remove annotations added by logging - command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get namespaces -o name {{ __default_logging_ops_projects | join(' ') }} register: __logging_ops_projects - name: Remove Annotation of Operations Projects command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig annotate {{ project }} openshift.io/logging.ui.hostname- with_items: "{{ __logging_ops_projects.stdout_lines }}" diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index d5cfacae3..0d7f8c056 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -17,7 +17,7 @@ - name: Generate certificates command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert + {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test check_mode: no diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index bb8ebec6b..913478027 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -1,9 +1,12 @@ --- - name: Gather OpenShift Logging Facts openshift_logging_facts: - oc_bin: "{{openshift.common.client_binary}}" + oc_bin: "{{openshift_client_binary}}" openshift_logging_namespace: "{{openshift_logging_namespace}}" +## This is include vs import because we need access to group/inventory variables +- include_tasks: set_defaults_from_current.yml + - name: Set logging project oc_project: state: present diff --git a/roles/openshift_logging/tasks/patch_configmap_file.yaml b/roles/openshift_logging/tasks/patch_configmap_file.yaml new file mode 100644 index 000000000..30087fe6a --- /dev/null +++ b/roles/openshift_logging/tasks/patch_configmap_file.yaml @@ -0,0 +1,35 @@ +--- +## The purpose of this task file is to get a patch that is based on the diff +## between configmap_current_file and configmap_new_file. The module +## logging_patch takes the paths of two files to compare and also a list of +## variables whose line we exclude from the diffs. +## We then patch the new configmap file so that we can build a configmap +## using that file later. We then use oc apply to idempotenly modify any +## existing configmap. + +## The following variables are expected to be provided when including this task: +# __configmap_output -- This is provided to us from patch_configmap_files.yaml +# it is a dict of the configmap where configmap_current_file exists +# configmap_current_file -- The name of the data file in the __configmap_output +# configmap_new_file -- The path to the file that we intend to oc apply later +# we apply our generated patch to this file. +# configmap_protected_lines -- The list of variables to exclude from the diff + +- copy: + content: "{{ __configmap_output.results.results[0]['data'][configmap_current_file] }}" + dest: "{{ tempdir }}/current.yml" + +- logging_patch: + original_file: "{{ tempdir }}/current.yml" + new_file: "{{ configmap_new_file }}" + whitelist: "{{ configmap_protected_lines | default([]) }}" + register: patch_output + +- copy: + content: "{{ patch_output.raw_patch }}\n" + dest: "{{ tempdir }}/patch.patch" + when: patch_output.raw_patch | length > 0 + +- command: > + patch --force --quiet -u "{{ configmap_new_file }}" "{{ tempdir }}/patch.patch" + when: patch_output.raw_patch | length > 0 diff --git a/roles/openshift_logging/tasks/patch_configmap_files.yaml b/roles/openshift_logging/tasks/patch_configmap_files.yaml new file mode 100644 index 000000000..74a9cc287 --- /dev/null +++ b/roles/openshift_logging/tasks/patch_configmap_files.yaml @@ -0,0 +1,31 @@ +--- +## The purpose of this task file is to take in a list of configmap files provided +## in the variable configmap_file_names, which correspond to the data sections +## within a configmap. We iterate over each of these files and create a patch +## from the diff between current_file and new_file to try to maintain any custom +## changes that a user may have made to a currently deployed configmap while +## trying to idempotently update with any role provided files. + +## The following variables are expected to be provided when including this task: +# configmap_name -- This is the name of the configmap that the files exist in +# configmap_namespace -- The namespace that the configmap lives in +# configmap_file_names -- This is expected to be passed in as a dict +# current_file -- The name of the data entry within the configmap +# new_file -- The file path to the file we are comparing to current_file +# protected_lines -- List of variables whose line will be excluded when creating a diff + +- oc_configmap: + name: "{{ configmap_name }}" + state: list + namespace: "{{ configmap_namespace }}" + register: __configmap_output + +- when: __configmap_output.results.stderr is undefined + include_tasks: patch_configmap_file.yaml + vars: + configmap_current_file: "{{ configmap_files.current_file }}" + configmap_new_file: "{{ configmap_files.new_file }}" + configmap_protected_lines: "{{ configmap_files.protected_lines | default([]) }}" + with_items: "{{ configmap_file_names }}" + loop_control: + loop_var: configmap_files diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml index 00de0ca06..bc817075d 100644 --- a/roles/openshift_logging/tasks/procure_server_certs.yaml +++ b/roles/openshift_logging/tasks/procure_server_certs.yaml @@ -27,7 +27,7 @@ - name: Creating signed server cert and key for {{ cert_info.procure_component }} command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert + {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key --signer-serial={{generated_certs_dir}}/ca.serial.txt diff --git a/roles/openshift_logging/tasks/set_defaults_from_current.yml b/roles/openshift_logging/tasks/set_defaults_from_current.yml new file mode 100644 index 000000000..dde362abe --- /dev/null +++ b/roles/openshift_logging/tasks/set_defaults_from_current.yml @@ -0,0 +1,34 @@ +--- + +## We are pulling default values from configmaps if they exist already +## Using conditional_set_fact allows us to set the value of a variable based on +## the value of another one, if it is already defined. Else we don't set the +## left hand side (it stays undefined as well). + +## conditional_set_fact allows us to specify a fact source, so first we try to +## set variables in the logging-elasticsearch & logging-elasticsearch-ops configmaps +## afterwards we set the value of the variable based on the value in the inventory +## but fall back to using the value from a configmap as a default. If neither is set +## then the variable remains undefined and the role default will be used. + +- conditional_set_fact: + facts: "{{ openshift_logging_facts['elasticsearch']['configmaps']['logging-elasticsearch']['elasticsearch.yml'] | flatten_dict }}" + vars: + __openshift_logging_es_number_of_shards: index.number_of_shards + __openshift_logging_es_number_of_replicas: index.number_of_replicas + when: openshift_logging_facts['elasticsearch']['configmaps']['logging-elasticsearch'] is defined + +- conditional_set_fact: + facts: "{{ openshift_logging_facts['elasticsearch_ops']['configmaps']['logging-elasticsearch-ops']['elasticsearch.yml'] | flatten_dict }}" + vars: + __openshift_logging_es_ops_number_of_shards: index.number_of_shards + __openshift_logging_es_ops_number_of_replicas: index.number_of_replicas + when: openshift_logging_facts['elasticsearch_ops']['configmaps']['logging-elasticsearch-ops'] is defined + +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + openshift_logging_es_number_of_shards: openshift_logging_es_number_of_shards | __openshift_logging_es_number_of_shards + openshift_logging_es_number_of_replicas: openshift_logging_es_number_of_replicas | __openshift_logging_es_number_of_replicas + openshift_logging_es_ops_number_of_shards: openshift_logging_es_ops_number_of_shards | __openshift_logging_es_ops_number_of_shards + openshift_logging_es_ops_number_of_replicas: openshift_logging_es_ops_number_of_replicas | __openshift_logging_es_ops_number_of_replicas diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index e7ef5ff22..53b464113 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -2,7 +2,7 @@ - name: Set default image variables based on deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name @@ -54,14 +54,17 @@ - copy: src: curator.yml dest: "{{ tempdir }}/curator.yml" - when: curator_config_contents is undefined changed_when: no -- copy: - content: "{{ curator_config_contents }}" - dest: "{{ tempdir }}/curator.yml" - when: curator_config_contents is defined - changed_when: no +- include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-curator" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "config.yaml" + new_file: "{{ tempdir }}/curator.yml" - name: Set Curator configmap oc_configmap: diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml index 95bf462d1..5bee58725 100644 --- a/roles/openshift_logging_curator/vars/main.yml +++ b/roles/openshift_logging_curator/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_curator_version: "3_6" -__allowed_curator_versions: ["3_5", "3_6", "3_7"] +__latest_curator_version: "3_8" +__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8"] diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index e91248d08..9e7646379 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -15,10 +15,10 @@ elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}" es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}" -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name @@ -111,7 +111,7 @@ - name: Create logging-metrics-reader-role command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n "{{ openshift_logging_elasticsearch_namespace }}" create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml" @@ -168,33 +168,31 @@ when: es_logging_contents is undefined changed_when: no -- set_fact: - __es_num_of_shards: "{{ _es_configmap | default({}) | walk('index.number_of_shards', '1') }}" - __es_num_of_replicas: "{{ _es_configmap | default({}) | walk('index.number_of_replicas', '0') }}" - - template: src: elasticsearch.yml.j2 dest: "{{ tempdir }}/elasticsearch.yml" vars: allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}" - es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(None) or __es_num_of_shards }}" - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(None) or __es_num_of_replicas }}" + es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" + es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}" es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}" when: es_config_contents is undefined changed_when: no -- copy: - content: "{{ es_logging_contents }}" - dest: "{{ tempdir }}/elasticsearch-logging.yml" - when: es_logging_contents is defined - changed_when: no - -- copy: - content: "{{ es_config_contents }}" - dest: "{{ tempdir }}/elasticsearch.yml" - when: es_config_contents is defined - changed_when: no +# create diff between current configmap files and our current files +- include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-elasticsearch" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "elasticsearch.yml" + new_file: "{{ tempdir }}/elasticsearch.yml" + protected_lines: ["number_of_shards", "number_of_replicas"] + - current_file: "logging.yml" + new_file: "{{ tempdir }}/elasticsearch-logging.yml" - name: Set ES configmap oc_configmap: diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index c8e995146..0e56a6eac 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -1,6 +1,6 @@ --- -__latest_es_version: "3_6" -__allowed_es_versions: ["3_5", "3_6", "3_7"] +__latest_es_version: "3_8" +__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8"] __allowed_es_types: ["data-master", "data-client", "master", "client"] __es_log_appenders: ['file', 'console'] __kibana_index_modes: ["unique", "shared_ops"] diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml index 96b181d61..31780a343 100644 --- a/roles/openshift_logging_eventrouter/tasks/main.yaml +++ b/roles/openshift_logging_eventrouter/tasks/main.yaml @@ -1,8 +1,8 @@ --- -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 87eedfb4b..486cfb8bc 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -34,10 +34,10 @@ msg: WARNING Use of openshift_logging_mux_client_mode=minimal is not recommended due to current scaling issues when: openshift_logging_mux_client_mode is defined and openshift_logging_mux_client_mode == 'minimal' -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name @@ -108,38 +108,28 @@ dest: "{{ tempdir }}/fluent.conf" vars: deploy_type: "{{ openshift_logging_fluentd_deployment_type }}" - when: fluentd_config_contents is undefined - changed_when: no - copy: src: fluentd-throttle-config.yaml dest: "{{ tempdir }}/fluentd-throttle-config.yaml" - when: fluentd_throttle_contents is undefined - changed_when: no - copy: src: secure-forward.conf dest: "{{ tempdir }}/secure-forward.conf" - when: fluentd_secureforward_contents is undefined - changed_when: no - -- copy: - content: "{{ fluentd_config_contents }}" - dest: "{{ tempdir }}/fluent.conf" - when: fluentd_config_contents is defined - changed_when: no -- copy: - content: "{{ fluentd_throttle_contents }}" - dest: "{{ tempdir }}/fluentd-throttle-config.yaml" - when: fluentd_throttle_contents is defined - changed_when: no - -- copy: - content: "{{ fluentd_secureforward_contents }}" - dest: "{{ tempdir }}/secure-forward.conf" - when: fluentd_secureforward_contents is defined - changed_when: no +- include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-fluentd" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "fluent.conf" + new_file: "{{ tempdir }}/fluent.conf" + - current_file: "throttle-config.yaml" + new_file: "{{ tempdir }}/fluentd-throttle-config.yaml" + - current_file: "secure-forward.conf" + new_file: "{{ tempdir }}/secure-forward.conf" - name: Set Fluentd configmap oc_configmap: diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml index 92a426952..762e3d4d0 100644 --- a/roles/openshift_logging_fluentd/vars/main.yml +++ b/roles/openshift_logging_fluentd/vars/main.yml @@ -1,5 +1,5 @@ --- -__latest_fluentd_version: "3_6" -__allowed_fluentd_versions: ["3_5", "3_6", "3_7"] +__latest_fluentd_version: "3_8" +__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8"] __allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"] __allowed_mux_client_modes: ["minimal", "maximal"] diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index a00248d11..3c3bd902e 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -1,9 +1,9 @@ --- # fail is we don't have an endpoint for ES to connect to? -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml index 241877a02..a2c54d8e4 100644 --- a/roles/openshift_logging_kibana/vars/main.yml +++ b/roles/openshift_logging_kibana/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_kibana_version: "3_6" -__allowed_kibana_versions: ["3_5", "3_6", "3_7"] +__latest_kibana_version: "3_8" +__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8"] diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 68948bce2..a281c6a53 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -7,10 +7,10 @@ msg: Operations logs destination is required when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == '' -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ var_file_name }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" loop_control: loop_var: var_file_name @@ -88,26 +88,24 @@ - copy: src: fluent.conf dest: "{{mktemp.stdout}}/fluent-mux.conf" - when: fluentd_mux_config_contents is undefined changed_when: no - copy: src: secure-forward.conf dest: "{{mktemp.stdout}}/secure-forward-mux.conf" - when: fluentd_mux_securefoward_contents is undefined changed_when: no -- copy: - content: "{{fluentd_mux_config_contents}}" - dest: "{{mktemp.stdout}}/fluent-mux.conf" - when: fluentd_mux_config_contents is defined - changed_when: no - -- copy: - content: "{{fluentd_mux_secureforward_contents}}" - dest: "{{mktemp.stdout}}/secure-forward-mux.conf" - when: fluentd_mux_secureforward_contents is defined - changed_when: no +- include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-mux" + configmap_namespace: "{{ openshift_logging_mux_namespace }}" + configmap_file_names: + - current_file: "fluent.conf" + new_file: "{{ tempdir }}/fluent-mux.conf" + - current_file: "secure-forward.conf" + new_file: "{{ tempdir }}/secure-forward-mux.conf" - name: Set Mux configmap oc_configmap: diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml index e7b57f4b5..1da053b4a 100644 --- a/roles/openshift_logging_mux/vars/main.yml +++ b/roles/openshift_logging_mux/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_mux_version: "3_6" -__allowed_mux_versions: ["3_5", "3_6", "3_7"] +__latest_mux_version: "3_8" +__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8"] diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 9f315b9af..9251d380b 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -18,7 +18,7 @@ retries: 120 delay: 1 changed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool delegate_to: "{{ openshift_master_host }}" run_once: true diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 5d292ffd0..7d96a467e 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -53,12 +53,12 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" oreg_auth_credentials_replace: False l_bind_docker_reg_auth: False -openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}" +openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False)) }}" containerized_svc_dir: "/usr/lib/systemd/system" ha_svc_template_path: "native-cluster" -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig" loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}" @@ -82,6 +82,15 @@ openshift_master_valid_grant_methods: openshift_master_is_scaleup_host: False +# openshift_master_oauth_template is deprecated. Should be added to deprecations +# and removed. +openshift_master_oauth_template: False +openshift_master_oauth_templates_default: + login: "{{ openshift_master_oauth_template }}" +openshift_master_oauth_templates: "{{ openshift_master_oauth_template | ternary(openshift_master_oauth_templates_default, False) }}" +# Here we combine openshift_master_oath_template into 'login' key of openshift_master_oath_templates, if not present. +l_openshift_master_oauth_templates: "{{ openshift_master_oauth_templates | default(openshift_master_oauth_templates_default) }}" + # These defaults assume forcing journald persistence, fsync to disk once # a second, rate-limiting to 10,000 logs a second, no forwarding to # syslog or wall, using 8GB of disk space maximum, using 10MB journal diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 1c43d335f..eea1401b8 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -19,7 +19,7 @@ name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}" state: present when: - - not openshift.common.is_containerized | bool + - not openshift_is_containerized | bool register: result until: result is succeeded @@ -31,12 +31,12 @@ owner: root group: root when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - name: Reload systemd units command: systemctl daemon-reload when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - name: Re-gather package dependent master facts openshift_facts: @@ -48,7 +48,7 @@ - name: Create the policy file if it does not already exist command: > - {{ openshift.common.client_binary }} adm create-bootstrap-policy-file + {{ openshift_client_binary }} adm create-bootstrap-policy-file --filename={{ openshift_master_policy }} args: creates: "{{ openshift_master_policy }}" @@ -69,7 +69,7 @@ package: name=httpd-tools state=present when: - item.kind == 'HTPasswdPasswordIdentityProvider' - - not openshift.common.is_atomic | bool + - not openshift_is_atomic | bool with_items: "{{ openshift.master.identity_providers }}" register: result until: result is succeeded @@ -164,7 +164,7 @@ - name: Install Master system container include_tasks: system_container.yml when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - l_is_master_system_container | bool - name: Create session secrets file diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml index 8b342a5b4..911a9bd3d 100644 --- a/roles/openshift_master/tasks/registry_auth.yml +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -43,7 +43,7 @@ set_fact: l_bind_docker_reg_auth: True when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - oreg_auth_user is defined - > (master_oreg_auth_credentials_stat.stat.exists diff --git a/roles/openshift_master/tasks/set_loopback_context.yml b/roles/openshift_master/tasks/set_loopback_context.yml index 487fefb63..7e013a699 100644 --- a/roles/openshift_master/tasks/set_loopback_context.yml +++ b/roles/openshift_master/tasks/set_loopback_context.yml @@ -1,13 +1,13 @@ --- - name: Test local loopback context command: > - {{ openshift.common.client_binary }} config view + {{ openshift_client_binary }} config view --config={{ openshift_master_loopback_config }} changed_when: false register: l_loopback_config - command: > - {{ openshift.common.client_binary }} config set-cluster + {{ openshift_client_binary }} config set-cluster --certificate-authority={{ openshift_master_config_dir }}/ca.crt --embed-certs=true --server={{ openshift.master.loopback_api_url }} {{ openshift.master.loopback_cluster_name }} @@ -17,7 +17,7 @@ register: set_loopback_cluster - command: > - {{ openshift.common.client_binary }} config set-context + {{ openshift_client_binary }} config set-context --cluster={{ openshift.master.loopback_cluster_name }} --namespace=default --user={{ openshift.master.loopback_user }} {{ openshift.master.loopback_context_name }} @@ -27,7 +27,7 @@ register: l_set_loopback_context - command: > - {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }} + {{ openshift_client_binary }} config use-context {{ openshift.master.loopback_context_name }} --config={{ openshift_master_loopback_config }} when: - l_set_loopback_context is changed diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 906ecf277..870ab7c57 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -7,7 +7,7 @@ containerized_svc_dir: "/etc/systemd/system" ha_svc_template_path: "docker-cluster" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - include_tasks: registry_auth.yml @@ -34,7 +34,7 @@ register: l_pull_result changed_when: "'Downloaded newer image' in l_pull_result.stdout" when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not l_is_master_system_container | bool - name: Create the ha systemd unit files diff --git a/roles/openshift_master/tasks/upgrade.yml b/roles/openshift_master/tasks/upgrade.yml index f84cf2f6e..f143673cf 100644 --- a/roles/openshift_master/tasks/upgrade.yml +++ b/roles/openshift_master/tasks/upgrade.yml @@ -1,6 +1,6 @@ --- - include_tasks: upgrade/rpm_upgrade.yml - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - include_tasks: upgrade/upgrade_scheduler.yml diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2 index 3f7a528a9..4c68155ea 100644 --- a/roles/openshift_master/templates/atomic-openshift-master.j2 +++ b/roles/openshift_master/templates/atomic-openshift-master.j2 @@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }} {% elif openshift_push_via_dns | default(false) %} OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000 {% endif %} -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index c224ad714..14023ea73 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -152,8 +152,8 @@ oauthConfig: {% if 'oauth_always_show_provider_selection' in openshift.master %} alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }} {% endif %} -{% if 'oauth_templates' in openshift.master %} - templates:{{ openshift.master.oauth_templates | lib_utils_to_padded_yaml(level=2) }} +{% if l_openshift_master_oauth_templates %} + templates:{{ l_openshift_master_oauth_templates | lib_utils_to_padded_yaml(level=2) }} {% endif %} assetPublicURL: {{ openshift.master.public_console_url }}/ grantConfig: diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 index cc21b37af..bff32b2e3 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 @@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }} {% elif openshift_push_via_dns | default(false) %} OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000 {% endif %} -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 index 493fc510e..b8a519baa 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 @@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }} {% elif openshift_push_via_dns | default(false) %} OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000 {% endif %} -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 273414f8d..00cabe574 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -47,7 +47,7 @@ - name: Create the master server certificate command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} @@ -71,7 +71,7 @@ - name: Generate the loopback master client config command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config --certificate-authority={{ openshift_ca_cert }} {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index ad9a21c96..85d0ac25c 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -74,8 +74,6 @@ master_count: "{{ openshift_master_count | default(None) }}" admission_plugin_config: "{{openshift_master_admission_plugin_config }}" kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config - oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2 - oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}" oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}" image_policy_config: "{{ openshift_master_image_policy_config | default(None) }}" dynamic_provisioning_enabled: "{{ openshift_master_dynamic_provisioning_enabled | default(None) }}" diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml index bb842d710..b71e35263 100644 --- a/roles/openshift_metrics/tasks/generate_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_certificates.yaml @@ -1,7 +1,7 @@ --- - name: generate ca certificate chain command: > - {{ openshift.common.client_binary }} adm ca create-signer-cert + {{ openshift_client_binary }} adm ca create-signer-cert --config={{ mktemp.stdout }}/admin.kubeconfig --key='{{ mktemp.stdout }}/ca.key' --cert='{{ mktemp.stdout }}/ca.crt' diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index 48584bd64..9026cc897 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -1,6 +1,6 @@ --- - shell: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }} + {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }} --config={{ mktemp.stdout }}/admin.kubeconfig get rc hawkular-cassandra-{{node}} -o jsonpath='{.spec.replicas}' || echo 0 vars: diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml index a4ffa1890..f45e7a042 100644 --- a/roles/openshift_metrics/tasks/install_hawkular.yaml +++ b/roles/openshift_metrics/tasks/install_hawkular.yaml @@ -1,6 +1,6 @@ --- - command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }} + {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }} --config={{ mktemp.stdout }}/admin.kubeconfig get rc hawkular-metrics -o jsonpath='{.spec.replicas}' register: hawkular_metrics_replica_count diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml index a33b28ba7..73e7454f0 100644 --- a/roles/openshift_metrics/tasks/install_heapster.yaml +++ b/roles/openshift_metrics/tasks/install_heapster.yaml @@ -1,6 +1,6 @@ --- - command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }} + {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }} --config={{ mktemp.stdout }}/admin.kubeconfig get rc heapster -o jsonpath='{.spec.replicas}' register: heapster_replica_count diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index 49d1d8cf1..106909941 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -70,7 +70,7 @@ - include_tasks: update_master_config.yaml - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index 9dfe360bb..b67077bca 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -9,10 +9,10 @@ - "'not installed' not in passlib_result.stdout" msg: "python-passlib rpm must be installed on control host" -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ item }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" - name: Set metrics image facts diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml index 1e1af40e8..8ccfb7192 100644 --- a/roles/openshift_metrics/tasks/oc_apply.yaml +++ b/roles/openshift_metrics/tasks/oc_apply.yaml @@ -1,7 +1,7 @@ --- - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} command: > - {{ openshift.common.client_binary }} + {{ openshift_client_binary }} --config={{ kubeconfig }} get {{file_content.kind}} {{file_content.metadata.name}} -o jsonpath='{.metadata.resourceVersion}' @@ -12,7 +12,7 @@ - name: Applying {{file_name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} apply -f {{ file_name }} -n {{namespace}} register: generation_apply @@ -21,7 +21,7 @@ - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} get {{file_content.kind}} {{file_content.metadata.name}} -o jsonpath='{.metadata.resourceVersion}' -n {{namespace}} diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml index d6756f9b9..976763236 100644 --- a/roles/openshift_metrics/tasks/pre_install.yaml +++ b/roles/openshift_metrics/tasks/pre_install.yaml @@ -14,7 +14,7 @@ - name: list existing secrets command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} + {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig get secrets -o name register: metrics_secrets diff --git a/roles/openshift_metrics/tasks/setup_certificate.yaml b/roles/openshift_metrics/tasks/setup_certificate.yaml index e6081c0d3..223bd975e 100644 --- a/roles/openshift_metrics/tasks/setup_certificate.yaml +++ b/roles/openshift_metrics/tasks/setup_certificate.yaml @@ -1,7 +1,7 @@ --- - name: generate {{ component }} keys command: > - {{ openshift.common.client_binary }} adm ca create-server-cert + {{ openshift_client_binary }} adm ca create-server-cert --config={{ mktemp.stdout }}/admin.kubeconfig --key='{{ mktemp.stdout }}/{{ component }}.key' --cert='{{ mktemp.stdout }}/{{ component }}.crt' diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml index 2037e8dc3..899251727 100644 --- a/roles/openshift_metrics/tasks/start_metrics.yaml +++ b/roles/openshift_metrics/tasks/start_metrics.yaml @@ -1,6 +1,6 @@ --- - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=hawkular-cassandra @@ -23,7 +23,7 @@ changed_when: metrics_cassandra_rc | length > 0 - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=hawkular-metrics @@ -45,7 +45,7 @@ changed_when: metrics_metrics_rc | length > 0 - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=heapster diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml index 9a2ce9267..4b1d7119d 100644 --- a/roles/openshift_metrics/tasks/stop_metrics.yaml +++ b/roles/openshift_metrics/tasks/stop_metrics.yaml @@ -1,6 +1,6 @@ --- - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=heapster @@ -22,7 +22,7 @@ loop_var: object - command: > - {{openshift.common.client_binary}} + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -l metrics-infra=hawkular-metrics @@ -44,7 +44,7 @@ changed_when: metrics_hawkular_rc | length > 0 - command: > - {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig + {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get rc -o name -l metrics-infra=hawkular-cassandra diff --git a/roles/openshift_metrics/tasks/uninstall_hosa.yaml b/roles/openshift_metrics/tasks/uninstall_hosa.yaml index 42ed02460..ae3306496 100644 --- a/roles/openshift_metrics/tasks/uninstall_hosa.yaml +++ b/roles/openshift_metrics/tasks/uninstall_hosa.yaml @@ -1,7 +1,7 @@ --- - name: remove Hawkular Agent (HOSA) components command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found --selector=metrics-infra=agent all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings register: delete_metrics @@ -9,7 +9,7 @@ - name: remove rolebindings command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found clusterrolebinding/hawkular-openshift-agent-rb changed_when: delete_metrics.stdout != 'No resources found' diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 1265c7bfd..0ab0eec4b 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -4,7 +4,7 @@ - name: remove metrics components command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found --selector=metrics-infra all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings,clusterrole register: delete_metrics @@ -12,7 +12,7 @@ - name: remove rolebindings command: > - {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found rolebinding/hawkular-view clusterrolebinding/heapster-cluster-reader diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index a90aad532..27fe2f5c0 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -34,19 +34,19 @@ openshift_node_kubelet_args_dict: cloud-provider: - aws cloud-config: - - "{{ openshift_config_base ~ '/aws.conf' }}" + - "{{ openshift_config_base ~ '/cloudprovider/aws.conf' }}" node-labels: "{{ l_node_kubelet_node_labels }}" openstack: cloud-provider: - openstack cloud-config: - - "{{ openshift_config_base ~ '/openstack.conf' }}" + - "{{ openshift_config_base ~ '/cloudprovider/openstack.conf' }}" node-labels: "{{ l_node_kubelet_node_labels }}" gce: cloud-provider: - gce cloud-config: - - "{{ openshift_config_base ~ '/gce.conf' }}" + - "{{ openshift_config_base ~ '/cloudprovider/gce.conf' }}" node-labels: "{{ l_node_kubelet_node_labels }}" undefined: node-labels: "{{ l_node_kubelet_node_labels }}" @@ -169,9 +169,9 @@ oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" oreg_auth_credentials_replace: False l_bind_docker_reg_auth: False openshift_use_crio: False -openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}" +openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False)) }}" -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" # NOTE # r_openshift_node_*_default may be defined external to this role. diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 62e0e1341..779916335 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -34,7 +34,7 @@ pause: seconds=15 when: - (not skip_node_svc_handlers | default(False) | bool) - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - name: restart node systemd: diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 8f38a47aa..1103fe4c9 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -4,7 +4,7 @@ - name: Pull container images include_tasks: container_images.yml - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - name: Start and enable openvswitch service systemd: @@ -13,7 +13,7 @@ state: started daemon_reload: yes when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - openshift_node_use_openshift_sdn | default(true) | bool register: ovs_start_result until: not (ovs_start_result is failed) @@ -58,7 +58,7 @@ # restarted after the node restarts docker and it will take up to 60 seconds for # systemd to start the master again - when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not openshift_node_bootstrap block: - name: Wait for master API to become available before proceeding diff --git a/roles/openshift_node/tasks/dnsmasq_install.yml b/roles/openshift_node/tasks/dnsmasq_install.yml index 0c8857b11..5e06ba032 100644 --- a/roles/openshift_node/tasks/dnsmasq_install.yml +++ b/roles/openshift_node/tasks/dnsmasq_install.yml @@ -12,7 +12,7 @@ - name: Install dnsmasq package: name=dnsmasq state=installed - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index fb98b7550..55738d759 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -1,5 +1,5 @@ --- -- when: not openshift.common.is_containerized | bool +- when: not openshift_is_containerized | bool block: - name: Install Node package package: @@ -25,7 +25,7 @@ until: result is succeeded - when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - not l_is_node_system_container | bool block: - name: Pre-pull node image when containerized diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 2daa6c75f..103572291 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -3,7 +3,7 @@ msg: "SELinux is disabled, This deployment type requires that SELinux is enabled." when: - (not ansible_selinux or ansible_selinux.status != 'enabled') - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' - not openshift_use_crio - include_tasks: dnsmasq_install.yml @@ -99,7 +99,3 @@ - include_tasks: config/workaround-bz1331590-ovs-oom-fix.yml when: openshift_node_use_openshift_sdn | default(true) | bool - -- name: include bootstrap node config - include_tasks: bootstrap.yml - when: openshift_node_bootstrap diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml index ab43ec049..92650e6b7 100644 --- a/roles/openshift_node/tasks/registry_auth.yml +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -41,7 +41,7 @@ set_fact: l_bind_docker_reg_auth: True when: - - openshift.common.is_containerized | bool + - openshift_is_containerized | bool - oreg_auth_user is defined - > (node_oreg_auth_credentials_stat.stat.exists diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml index 52d80357e..e30f58a9a 100644 --- a/roles/openshift_node/tasks/storage_plugins/ceph.yml +++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml @@ -1,6 +1,6 @@ --- - name: Install Ceph storage plugin dependencies package: name=ceph-common state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml index e60f57ae7..c04a6922a 100644 --- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml @@ -1,7 +1,7 @@ --- - name: Install GlusterFS storage plugin dependencies package: name=glusterfs-fuse state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml index d3a3668d5..a8048c42f 100644 --- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml +++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml @@ -1,6 +1,6 @@ --- - name: Install iSCSI storage plugin dependencies package: name=iscsi-initiator-utils state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml index 1484aa076..c2922644f 100644 --- a/roles/openshift_node/tasks/storage_plugins/nfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml @@ -1,7 +1,7 @@ --- - name: Install NFS storage plugin dependencies package: name=nfs-utils state=present - when: not openshift.common.is_atomic | bool + when: not openshift_is_atomic | bool register: result until: result is succeeded diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 262ee698b..e33a4999f 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -2,13 +2,13 @@ - name: Install Node service file template: dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" - src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}" + src: "{{ openshift_is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}" when: not l_is_node_system_container | bool notify: - reload systemd units - restart node -- when: openshift.common.is_containerized | bool +- when: openshift_is_containerized | bool block: - name: include node deps docker service file include_tasks: config/install-node-deps-docker-service-file.yml diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml index f0a013e45..02e417937 100644 --- a/roles/openshift_node/tasks/upgrade.yml +++ b/roles/openshift_node/tasks/upgrade.yml @@ -1,11 +1,10 @@ --- # input variables: # - l_docker_upgrade -# - openshift.common.is_atomic +# - openshift_is_atomic # - node_config_hook # - openshift_pkg_version -# - openshift.common.is_containerized -# - deployment_type +# - openshift_is_containerized # - openshift_release # tasks file for openshift_node_upgrade @@ -26,7 +25,7 @@ include_tasks: upgrade/rpm_upgrade_install.yml vars: openshift_version: "{{ openshift_pkg_version | default('') }}" - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - include_tasks: "{{ node_config_hook }}" diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml index 439700df6..50044eb3e 100644 --- a/roles/openshift_node/tasks/upgrade/config_changes.yml +++ b/roles/openshift_node/tasks/upgrade/config_changes.yml @@ -1,7 +1,7 @@ --- - name: Update systemd units include_tasks: ../systemd_units.yml - when: openshift.common.is_containerized + when: openshift_is_containerized - name: Update oreg value yedit: diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml index 45b0be0a0..bd6f42182 100644 --- a/roles/openshift_node/tasks/upgrade/restart.yml +++ b/roles/openshift_node/tasks/upgrade/restart.yml @@ -1,7 +1,7 @@ --- # input variables: # - openshift_service_type -# - openshift.common.is_containerized +# - openshift_is_containerized # - openshift.common.hostname # - openshift.master.api_port diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml index cc9a8f2d9..91a358095 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml @@ -3,7 +3,7 @@ # - openshift_service_type # - component # - openshift_pkg_version -# - openshift.common.is_atomic +# - openshift_is_atomic # Pre-pull new node rpm, but don't install - name: download new node packages diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml index 32eeb76c6..c9094e05a 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml @@ -3,7 +3,7 @@ # - openshift_service_type # - component # - openshift_pkg_version -# - openshift.common.is_atomic +# - openshift_is_atomic # Install the pre-pulled RPM # Note: dnsmasq is covered in it's own play. openvswitch is included here diff --git a/roles/openshift_node/tasks/upgrade/stop_services.yml b/roles/openshift_node/tasks/upgrade/stop_services.yml index 2fff556e5..6d92516c3 100644 --- a/roles/openshift_node/tasks/upgrade/stop_services.yml +++ b/roles/openshift_node/tasks/upgrade/stop_services.yml @@ -19,7 +19,7 @@ - "{{ openshift_service_type }}-master-controllers" - "{{ openshift_service_type }}-node" failed_when: false - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool - service: name: docker @@ -40,4 +40,4 @@ - "{{ openshift_service_type }}-node" - openvswitch failed_when: false - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool diff --git a/roles/openshift_node/tasks/upgrade_pre.yml b/roles/openshift_node/tasks/upgrade_pre.yml index 7f591996c..3ae7dc6b6 100644 --- a/roles/openshift_node/tasks/upgrade_pre.yml +++ b/roles/openshift_node/tasks/upgrade_pre.yml @@ -11,7 +11,7 @@ command: "{{ ansible_pkg_mgr }} makecache" register: result until: result is succeeded - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool - name: Check Docker image count shell: "docker images -aq | wc -l" @@ -26,7 +26,7 @@ - l_docker_upgrade | bool - include_tasks: upgrade/containerized_upgrade_pull.yml - when: openshift.common.is_containerized | bool + when: openshift_is_containerized | bool # Prepull the rpms for docker upgrade, but don't install - name: download docker upgrade rpm @@ -40,7 +40,7 @@ - include_tasks: upgrade/rpm_upgrade.yml vars: openshift_version: "{{ openshift_pkg_version | default('') }}" - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool # https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory - name: Check for swap usage diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index 8b43beb07..873744f34 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -6,6 +6,12 @@ Before={{ openshift_service_type }}-node.service {% if openshift_use_crio %}Wants=cri-o.service{% endif %} [Service] -ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi" +ExecStart=/bin/bash -c 'if [[ -f /usr/bin/docker-current ]]; \ + then echo DOCKER_ADDTL_BIND_MOUNTS=\"--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro \ + --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro \ + --volume=/etc/containers/registries:/etc/containers/registries:ro \ + {% if l_bind_docker_reg_auth %} --volume={{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\" > \ + /etc/sysconfig/{{ openshift_service_type }}-node-dep; \ + else echo "#DOCKER_ADDTL_BIND_MOUNTS=" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi' ExecStop= SyslogIdentifier={{ openshift_service_type }}-node-dep diff --git a/roles/openshift_node_certificates/defaults/main.yml b/roles/openshift_node_certificates/defaults/main.yml index b42b75be9..da1570528 100644 --- a/roles/openshift_node_certificates/defaults/main.yml +++ b/roles/openshift_node_certificates/defaults/main.yml @@ -2,4 +2,4 @@ openshift_node_cert_expire_days: 730 openshift_ca_host: '' -openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}" diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index 1e5ebe98e..e95e38fdf 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -51,7 +51,7 @@ - name: Generate the node client config command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} @@ -77,7 +77,7 @@ - name: Generate the node server certificate command: > - {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert + {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key --expire-days={{ openshift_node_cert_expire_days }} diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml index 346605ff7..ef9ab7f5f 100644 --- a/roles/openshift_persistent_volumes/tasks/pv.yml +++ b/roles/openshift_persistent_volumes/tasks/pv.yml @@ -8,7 +8,7 @@ - name: Create PersistentVolumes command: > - {{ openshift.common.client_binary }} create + {{ openshift_client_binary }} create -f {{ mktemp.stdout }}/persistent-volumes.yml --config={{ mktemp.stdout }}/admin.kubeconfig register: pv_create_output diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml index e44f9b18f..2c5519192 100644 --- a/roles/openshift_persistent_volumes/tasks/pvc.yml +++ b/roles/openshift_persistent_volumes/tasks/pvc.yml @@ -8,7 +8,7 @@ - name: Create PersistentVolumeClaims command: > - {{ openshift.common.client_binary }} create + {{ openshift_client_binary }} create -f {{ mktemp.stdout }}/persistent-volume-claims.yml --config={{ mktemp.stdout }}/admin.kubeconfig register: pvc_create_output diff --git a/roles/openshift_project_request_template/tasks/main.yml b/roles/openshift_project_request_template/tasks/main.yml index c31ee5795..3403840fb 100644 --- a/roles/openshift_project_request_template/tasks/main.yml +++ b/roles/openshift_project_request_template/tasks/main.yml @@ -6,7 +6,7 @@ - name: Generate default project template command: | - {{ openshift.common.client_binary | quote }} \ + {{ openshift_client_binary | quote }} \ --config {{ openshift.common.config_base | quote }}/master/admin.kubeconfig \ --output yaml \ adm create-bootstrap-project-template \ @@ -28,7 +28,7 @@ - name: Create or update project request template command: | - {{ openshift.common.client_binary }} \ + {{ openshift_client_binary }} \ --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ --namespace {{ openshift_project_request_template_namespace | quote }} \ apply --filename {{ mktemp.stdout }} diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index 2fb1c08e5..749df5152 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -39,7 +39,7 @@ # TODO remove this when annotations are supported by oc_serviceaccount - name: annotate serviceaccount command: > - {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} + {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} serviceaccount prometheus serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' @@ -97,7 +97,7 @@ # TODO remove this when annotations are supported by oc_service - name: annotate prometheus service command: > - {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} + {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} service prometheus prometheus.io/scrape='true' prometheus.io/scheme=https @@ -105,7 +105,7 @@ - name: annotate alerts service command: > - {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} + {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls' # create prometheus and alerts routes diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml index 38798e1f5..b859eb111 100644 --- a/roles/openshift_prometheus/tasks/main.yaml +++ b/roles/openshift_prometheus/tasks/main.yaml @@ -1,5 +1,5 @@ --- -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ item }}" with_first_found: - "{{ openshift_deployment_type }}.yml" diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml index e543d753c..de763f6cf 100644 --- a/roles/openshift_provisioners/tasks/install_efs.yaml +++ b/roles/openshift_provisioners/tasks/install_efs.yaml @@ -1,7 +1,7 @@ --- - name: Check efs current replica count command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs -o jsonpath='{.spec.replicas}' -n {{openshift_provisioners_project}} register: efs_replica_count when: not ansible_check_mode @@ -58,7 +58,7 @@ # anyuid in order to run as root & chgrp shares with allocated gids - name: "Check efs anyuid permissions" command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc/anyuid -o jsonpath='{.users}' register: efs_anyuid check_mode: no @@ -66,7 +66,7 @@ - name: "Set anyuid permissions for efs" command: > - {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy + {{ openshift_client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs register: efs_output failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml index 49d03f203..a4ce53eae 100644 --- a/roles/openshift_provisioners/tasks/oc_apply.yaml +++ b/roles/openshift_provisioners/tasks/oc_apply.yaml @@ -1,7 +1,7 @@ --- - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} get {{file_content.kind}} {{file_content.metadata.name}} -o jsonpath='{.metadata.resourceVersion}' -n {{namespace}} @@ -11,7 +11,7 @@ - name: Applying {{file_name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} apply -f {{ file_name }} -n {{ namespace }} register: generation_apply @@ -20,7 +20,7 @@ - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} get {{file_content.kind}} {{file_content.metadata.name}} -o jsonpath='{.metadata.resourceVersion}' -n {{namespace}} @@ -32,7 +32,7 @@ - name: Removing previous {{file_name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} delete -f {{ file_name }} -n {{ namespace }} register: generation_delete @@ -42,7 +42,7 @@ - name: Recreating {{file_name}} command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} + {{ openshift_client_binary }} --config={{ kubeconfig }} apply -f {{ file_name }} -n {{ namespace }} register: generation_apply diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml index 602dee773..ac12087ec 100644 --- a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml +++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml @@ -5,7 +5,7 @@ # delete the deployment objects that we had created - name: delete provisioner api objects command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete {{ item }} --selector provisioners-infra -n {{ openshift_provisioners_project }} --ignore-not-found=true with_items: - dc @@ -15,7 +15,7 @@ # delete our old secrets - name: delete provisioner secrets command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true with_items: - provisioners-efs @@ -26,7 +26,7 @@ # delete cluster role bindings - name: delete cluster role bindings command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete clusterrolebindings {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true with_items: - run-provisioners-efs diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 35206049f..911005bb6 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -40,7 +40,7 @@ - include_tasks: rhel_repos.yml when: - ansible_distribution == 'RedHat' - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' - rhsub_user is defined - rhsub_pass is defined diff --git a/roles/openshift_sanitize_inventory/meta/main.yml b/roles/openshift_sanitize_inventory/meta/main.yml index 324ba06d8..cde3eccb6 100644 --- a/roles/openshift_sanitize_inventory/meta/main.yml +++ b/roles/openshift_sanitize_inventory/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info: - system dependencies: - role: lib_utils +- role: lib_openshift diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 651d896cf..62d460272 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -3,37 +3,11 @@ # the user would also be aware of any deprecated variables they should note to adjust - include_tasks: deprecations.yml -- name: Abort when conflicting deployment type variables are set - when: - - deployment_type is defined - - openshift_deployment_type is defined - - openshift_deployment_type != deployment_type - fail: - msg: |- - openshift_deployment_type is set to "{{ openshift_deployment_type }}". - deployment_type is set to "{{ deployment_type }}". - To avoid unexpected results, this conflict is not allowed. - deployment_type is deprecated in favor of openshift_deployment_type. - Please specify only openshift_deployment_type, or make both the same. - - name: Standardize on latest variable names set_fact: - # goal is to deprecate deployment_type in favor of openshift_deployment_type. - # both will be accepted for now, but code should refer to the new name. - # TODO: once this is well-documented, add deprecation notice if using old name. - deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}" - openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}" deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}" openshift_deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}" -- name: Abort when deployment type is invalid - # this variable is required; complain early and clearly if it is invalid. - when: openshift_deployment_type not in known_openshift_deployment_types - fail: - msg: |- - Please set openshift_deployment_type to one of: - {{ known_openshift_deployment_types | join(', ') }} - - name: Normalize openshift_release set_fact: # Normalize release if provided, e.g. "v3.5" => "3.5" diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml index 0fc2372d2..df15948d2 100644 --- a/roles/openshift_sanitize_inventory/vars/main.yml +++ b/roles/openshift_sanitize_inventory/vars/main.yml @@ -1,7 +1,4 @@ --- -# origin uses community packages named 'origin' -# openshift-enterprise uses Red Hat packages named 'atomic-openshift' -known_openshift_deployment_types: ['origin', 'openshift-enterprise'] __deprecation_header: "[DEPRECATION WARNING]:" diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml index cd7bda2c6..e478023f8 100644 --- a/roles/openshift_service_catalog/tasks/generate_certs.yml +++ b/roles/openshift_service_catalog/tasks/generate_certs.yml @@ -12,7 +12,7 @@ - name: Generate signing cert command: > - {{ openshift.common.client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert + {{ openshift_client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert --key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt --serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer @@ -60,7 +60,7 @@ register: apiserver_ca - shell: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" register: get_apiservices changed_when: no diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 4b842c166..cfecaa12c 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -6,10 +6,10 @@ register: mktemp changed_when: False -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ item }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" - name: Set service_catalog image facts @@ -38,7 +38,7 @@ - name: Make kube-service-catalog project network global command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog - include_tasks: generate_certs.yml @@ -93,7 +93,7 @@ # only do this if we don't already have the updated role info - name: update edit role for service catalog and pod preset access command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml when: - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) @@ -116,7 +116,7 @@ # only do this if we don't already have the updated role info - name: update admin role for service catalog and pod preset access command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml when: - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) @@ -139,7 +139,7 @@ # only do this if we don't already have the updated role info - name: update view role for service catalog access command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml when: - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml index a832e1f85..aa32d0513 100644 --- a/roles/openshift_service_catalog/tasks/remove.yml +++ b/roles/openshift_service_catalog/tasks/remove.yml @@ -1,7 +1,7 @@ --- - name: Remove Service Catalog APIServer command: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog # TODO: this module doesn't currently remove this #- name: Remove service catalog api service @@ -48,7 +48,7 @@ - name: Remove Service Catalog kube-system Role Bindinds shell: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template @@ -58,7 +58,7 @@ - name: Remove Service Catalog kube-service-catalog Role Bindinds shell: > - {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - + {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index da34fab2a..4cbe262d2 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -6,16 +6,16 @@ openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_gluste openshift_storage_glusterfs_use_default_selector: False openshift_storage_glusterfs_storageclass: True openshift_storage_glusterfs_storageclass_default: False -openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" +openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' openshift_storage_glusterfs_block_deploy: True -openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}" +openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}" openshift_storage_glusterfs_block_version: 'latest' openshift_storage_glusterfs_block_host_vol_create: True openshift_storage_glusterfs_block_host_vol_size: 100 openshift_storage_glusterfs_block_host_vol_max: 15 openshift_storage_glusterfs_s3_deploy: True -openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}" +openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}" openshift_storage_glusterfs_s3_version: 'latest' openshift_storage_glusterfs_s3_account: "{{ omit }}" openshift_storage_glusterfs_s3_user: "{{ omit }}" @@ -29,7 +29,7 @@ openshift_storage_glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_is openshift_storage_glusterfs_heketi_is_missing: True openshift_storage_glusterfs_heketi_deploy_is_missing: True openshift_storage_glusterfs_heketi_cli: 'heketi-cli' -openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" +openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" openshift_storage_glusterfs_heketi_version: 'latest' openshift_storage_glusterfs_heketi_admin_key: "{{ omit }}" openshift_storage_glusterfs_heketi_user_key: "{{ omit }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index 9307cb957..001578406 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -2,7 +2,7 @@ - name: Make sure heketi-client is installed package: name=heketi-client state=present when: - - not openshift.common.is_atomic | bool + - not openshift_is_atomic | bool - not glusterfs_heketi_is_native | bool register: result until: result is succeeded @@ -238,14 +238,14 @@ - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" + glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" changed_when: False - name: Place heketi topology on heketi Pod - shell: "{{ openshift.common.client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json" + shell: "{{ openshift_client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json" when: - glusterfs_heketi_is_native diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 60b9ca497..c0a8c53de 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -4,7 +4,7 @@ register: setup_storage - name: Copy heketi-storage list - shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" + shell: "{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" # This is used in the subsequent task - name: Copy the admin client config @@ -15,7 +15,7 @@ # Need `command` here because heketi-storage.json contains multiple objects. - name: Copy heketi DB to GlusterFS volume - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}" + command: "{{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}" when: setup_storage.rc == 0 - name: Wait for copy job to finish @@ -126,7 +126,7 @@ - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'" + glusterfs_heketi_client: "{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml index c8e7b6d7c..ff92e59e5 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/main.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml @@ -2,7 +2,7 @@ # TODO -- this may actually work on atomic hosts - fail: msg: "openshift_storage_nfs_lvm is not compatible with atomic host" - when: openshift.common.is_atomic | bool + when: openshift_is_atomic | bool - name: Create lvm volumes lvol: vg={{osnl_volume_group}} lv={{ item }} size={{osnl_volume_size}}G diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml index 94dc63bd2..9a72adbdc 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml @@ -1,7 +1,7 @@ --- - name: Install NFS server package: name=nfs-utils state=present - when: not openshift.common.is_containerized | bool + when: not openshift_is_containerized | bool register: result until: result is succeeded diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index e50d5371e..97e58ffac 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -1,10 +1,6 @@ --- # Determine the openshift_version to configure if none has been specified or set previously. -- set_fact: - is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}" - is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}" - # Block attempts to install origin without specifying some kind of version information. # This is because the latest tags for origin are usually alpha builds, which should not # be used by default. Users must indicate what they want. @@ -16,7 +12,7 @@ component images to use. You may want the latest (usually alpha) releases or a more stable release. (Suggestion: add openshift_release="x.y" to inventory.) when: - - is_containerized | bool + - openshift_is_containerized | bool - openshift.common.deployment_type == 'origin' - openshift_release is not defined - openshift_image_tag is not defined @@ -94,11 +90,11 @@ block: - name: Set openshift_version for rpm installation include_tasks: set_version_rpm.yml - when: not is_containerized | bool + when: not openshift_is_containerized | bool - name: Set openshift_version for containerized installation include_tasks: set_version_containerized.yml - when: is_containerized | bool + when: openshift_is_containerized | bool - block: - name: Get available {{ openshift_service_type}} version @@ -121,8 +117,8 @@ - openshift_pkg_version is not defined - openshift_image_tag is not defined when: - - is_containerized | bool - - not is_atomic | bool + - openshift_is_containerized | bool + - not openshift_is_atomic | bool # Warn if the user has provided an openshift_image_tag but is not doing a containerized install # NOTE: This will need to be modified/removed for future container + rpm installations work. @@ -132,7 +128,7 @@ openshift_image_tag is used for containerized installs. If you are trying to specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node. when: - - not is_containerized | bool + - not openshift_is_containerized | bool - openshift_image_tag is defined # At this point we know openshift_version is set appropriately. Now we set @@ -182,14 +178,14 @@ msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories." name: Abort if openshift_pkg_version was not set when: - - not is_containerized | bool + - not openshift_is_containerized | bool - openshift_version == '0.0' # We can't map an openshift_release to full rpm version like we can with containers; make sure # the rpm version we looked up matches the release requested and error out if not. - name: For an RPM install, abort when the release requested does not match the available version. when: - - not is_containerized | bool + - not openshift_is_containerized | bool - openshift_release is defined assert: that: diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index 1253c1133..765263db5 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -1,9 +1,9 @@ --- # Fact setting -- name: Set default image variables based on deployment type +- name: Set default image variables based on openshift_deployment_type include_vars: "{{ item }}" with_first_found: - - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "{{ openshift_deployment_type }}.yml" - "default_images.yml" - name: set template_service_broker facts @@ -44,16 +44,16 @@ - name: Apply template file shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}" --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" --param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }} - | {{ openshift.common.client_binary }} apply -f - + | {{ openshift_client_binary }} apply -f - # reconcile with rbac - name: Reconcile with RBAC file shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift.common.client_binary }} auth reconcile -f - + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f - # Check that the TSB is running - name: Verify that TSB is running @@ -80,7 +80,7 @@ # Register with broker - name: Register TSB with broker shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift.common.client_binary }} apply -f - + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply -f - - file: state: absent diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml index 8b5593ff9..8b4d798db 100644 --- a/roles/template_service_broker/tasks/remove.yml +++ b/roles/template_service_broker/tasks/remove.yml @@ -13,11 +13,11 @@ - name: Delete TSB broker shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift.common.client_binary }} delete --ignore-not-found -f - + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f - - name: Delete TSB objects shell: > - {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift.common.client_binary }} delete --ignore-not-found -f - + {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f - - name: empty out tech preview extension file for service console UI copy: diff --git a/roles/tuned/tasks/main.yml b/roles/tuned/tasks/main.yml index e95d274d5..4a28d47b2 100644 --- a/roles/tuned/tasks/main.yml +++ b/roles/tuned/tasks/main.yml @@ -11,7 +11,7 @@ block: - name: Set tuned OpenShift variables set_fact: - openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}" + openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift_is_atomic else 'virtual-guest' }}" - name: Ensure directory structure exists file: @@ -348,21 +348,10 @@ class OpenShiftAnsibleSyntaxCheck(Command): # --syntax-check each entry point playbook try: # Create a host group list to avoid WARNING on unmatched host patterns - host_group_list = [ - 'etcd,masters,nodes,OSEv3', - 'oo_all_hosts', - 'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,' - 'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate', - 'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes', - 'oo_nodes_to_config,oo_nodes_to_upgrade', - 'oo_nodes_use_kuryr,oo_nodes_use_flannel', - 'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv', - 'oo_lb_to_config', - 'oo_nfs_to_config', - 'glusterfs,glusterfs_registry,'] + tox_ansible_inv = os.environ['TOX_ANSIBLE_INV_PATH'] subprocess.check_output( - ['ansible-playbook', '-i ' + ','.join(host_group_list), - '--syntax-check', playbook] + ['ansible-playbook', '-i', tox_ansible_inv, + '--syntax-check', playbook, '-e', '@{}_extras'.format(tox_ansible_inv)] ) except subprocess.CalledProcessError as cpe: print('{}Execution failed: {}{}'.format( diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml index 006a71bd9..451ac0972 100644 --- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml @@ -4,7 +4,7 @@ vars: image: preflight-aos-package-checks l_host_vars: - deployment_type: openshift-enterprise + openshift_deployment_type: openshift-enterprise - name: Fail as required packages cannot be installed hosts: all diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml index b4f18e3b5..e37487f13 100644 --- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml @@ -3,7 +3,7 @@ vars: image: preflight-aos-package-checks l_host_vars: - deployment_type: origin + openshift_deployment_type: origin - name: Succeeds as Origin packages are public hosts: all diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml index 4e2b8a50c..9c845e1e5 100644 --- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml @@ -3,7 +3,7 @@ vars: image: preflight-aos-package-checks l_host_vars: - deployment_type: openshift-enterprise + openshift_deployment_type: openshift-enterprise openshift_release: 3.2 - name: Success when AOS version matches openshift_release diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml index e1f8d74e6..9ae811939 100644 --- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml +++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml @@ -4,7 +4,7 @@ vars: image: preflight-aos-package-checks l_host_vars: - deployment_type: openshift-enterprise + openshift_deployment_type: openshift-enterprise openshift_release: 3.2 - name: Failure when AOS version doesn't match openshift_release diff --git a/test/tox-inventory.txt b/test/tox-inventory.txt new file mode 100644 index 000000000..6e57d224b --- /dev/null +++ b/test/tox-inventory.txt @@ -0,0 +1,105 @@ +[OSEv3] +localhost + + +[OSEv3:children] +etcd +masters +nodes +oo_all_hosts +oo_etcd_to_config +oo_new_etcd_to_config +oo_first_etcd +oo_etcd_hosts_to_backup +oo_etcd_hosts_to_upgrade +oo_etcd_to_migrate +oo_masters +oo_masters_to_config +oo_first_master +oo_containerized_master_nodes +oo_nodes_to_config +oo_nodes_to_upgrade +oo_nodes_use_kuryr +oo_nodes_use_flannel +oo_nodes_use_calico +oo_nodes_use_nuage +oo_nodes_use_contiv +oo_lb_to_config +oo_nfs_to_config +glusterfs +glusterfs_registry + +[etcd] +localhost + +[masters] +localhost + +[nodes] +localhost + +[oo_all_hosts] +localhost + +[oo_etcd_to_config] +localhost + +[oo_new_etcd_to_config] +localhost + +[oo_first_etcd] +localhost + +[oo_etcd_hosts_to_backup] +localhost + +[oo_etcd_hosts_to_upgrade] +localhost + +[oo_etcd_to_migrate] +localhost + +[oo_masters] +localhost + +[oo_masters_to_config] +localhost + +[oo_first_master] +localhost + +[oo_containerized_master_nodes] +localhost + +[oo_nodes_to_config] +localhost + +[oo_nodes_to_upgrade] +localhost + +[oo_nodes_use_kuryr] +localhost + +[oo_nodes_use_flannel] +localhost + +[oo_nodes_use_calico] +localhost + +[oo_nodes_use_nuage] +localhost + +[oo_nodes_use_contiv] +localhost + +[oo_lb_to_config] +localhost + +[oo_nfs_to_config] +localhost + +[glusterfs] +localhost + +[glusterfs_registry] +localhost diff --git a/test/tox-inventory.txt_extras b/test/tox-inventory.txt_extras new file mode 100644 index 000000000..f73610570 --- /dev/null +++ b/test/tox-inventory.txt_extras @@ -0,0 +1,3 @@ +--- +hostvars: + localhost: {} @@ -8,6 +8,7 @@ skip_missing_interpreters=True [testenv] skip_install=True +setenv = TOX_ANSIBLE_INV_PATH = {toxinidir}/test/tox-inventory.txt deps = -rrequirements.txt -rtest-requirements.txt |