diff options
Diffstat (limited to 'roles')
175 files changed, 1382 insertions, 460 deletions
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index ff90f59a3..c90bbbe9b 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -30,9 +30,9 @@      ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"      ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}" -- include: validate_facts.yml +- include_tasks: validate_facts.yml -- include: generate_certs.yml +- include_tasks: generate_certs.yml  # Deployment of ansible-service-broker starts here  - name: create openshift-ansible-service-broker project diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml index f5e06d163..4a3c15d01 100644 --- a/roles/ansible_service_broker/tasks/main.yml +++ b/roles/ansible_service_broker/tasks/main.yml @@ -1,8 +1,8 @@  ---  # do any asserts here -- include: install.yml +- include_tasks: install.yml    when: ansible_service_broker_install | bool -- include: remove.yml +- include_tasks: remove.yml    when: ansible_service_broker_remove | bool diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index 066ee3f3b..34754502a 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -1,7 +1,6 @@  ---  - name: setup firewall -  include: firewall.yml -  static: yes +  import_tasks: firewall.yml  - name: Install cockpit-ws    package: name={{ item }} state=present diff --git a/roles/contiv/tasks/main.yml b/roles/contiv/tasks/main.yml index 40a0f9e61..cb9196a71 100644 --- a/roles/contiv/tasks/main.yml +++ b/roles/contiv/tasks/main.yml @@ -5,10 +5,10 @@      recurse: yes      state: directory -- include: download_bins.yml +- include_tasks: download_bins.yml -- include: netmaster.yml +- include_tasks: netmaster.yml    when: contiv_role == "netmaster" -- include: netplugin.yml +- include_tasks: netplugin.yml    when: contiv_role == "netplugin" diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml index cc52d3a43..6f15af8c2 100644 --- a/roles/contiv/tasks/netmaster.yml +++ b/roles/contiv/tasks/netmaster.yml @@ -1,8 +1,8 @@  --- -- include: netmaster_firewalld.yml +- include_tasks: netmaster_firewalld.yml    when: has_firewalld -- include: netmaster_iptables.yml +- include_tasks: netmaster_iptables.yml    when: not has_firewalld and has_iptables  - name: Netmaster | Check is /etc/hosts file exists @@ -70,8 +70,8 @@      state: started    register: netmaster_started -- include: aci.yml +- include_tasks: aci.yml    when: contiv_fabric_mode == "aci" -- include: default_network.yml +- include_tasks: default_network.yml    when: contiv_default_network == true diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml index e861a2591..0b2f91bab 100644 --- a/roles/contiv/tasks/netplugin.yml +++ b/roles/contiv/tasks/netplugin.yml @@ -1,8 +1,8 @@  --- -- include: netplugin_firewalld.yml +- include_tasks: netplugin_firewalld.yml    when: has_firewalld -- include: netplugin_iptables.yml +- include_tasks: netplugin_iptables.yml    when: has_iptables  - name: Netplugin | Ensure localhost entry correct in /etc/hosts @@ -19,7 +19,7 @@      line: '::1 '      state: absent -- include: ovs.yml +- include_tasks: ovs.yml    when: netplugin_driver == "ovs"  - name: Netplugin | Create Netplugin bin symlink diff --git a/roles/contiv/tasks/ovs.yml b/roles/contiv/tasks/ovs.yml index 0c1b994c7..5c92e90e9 100644 --- a/roles/contiv/tasks/ovs.yml +++ b/roles/contiv/tasks/ovs.yml @@ -1,5 +1,5 @@  --- -- include: packageManagerInstall.yml +- include_tasks: packageManagerInstall.yml    when: source_type == "packageManager"    tags:      - binary-update diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml index e0d48e643..d5726476c 100644 --- a/roles/contiv/tasks/packageManagerInstall.yml +++ b/roles/contiv/tasks/packageManagerInstall.yml @@ -3,7 +3,7 @@    set_fact:      did_install: false -- include: pkgMgrInstallers/centos-install.yml +- include_tasks: pkgMgrInstallers/centos-install.yml    when: (ansible_os_family == "RedHat") and          not is_atomic diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml index 7a4972fca..3267a4ab0 100644 --- a/roles/contiv_facts/tasks/main.yml +++ b/roles/contiv_facts/tasks/main.yml @@ -81,8 +81,8 @@      has_iptables: false  # collect information about what packages are installed -- include: rpm.yml +- include_tasks: rpm.yml    when: has_rpm -- include: fedora-install.yml +- include_tasks: fedora-install.yml    when: not is_atomic and ansible_distribution == "Fedora" diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 69ee62790..b02a74711 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -2,7 +2,7 @@  # These tasks dispatch to the proper set of docker tasks based on the  # inventory:openshift_docker_use_system_container variable -- include: udev_workaround.yml +- include_tasks: udev_workaround.yml    when: docker_udev_workaround | default(False) | bool  - set_fact: @@ -20,7 +20,7 @@      - not l_use_crio_only  - name: Use Package Docker if Requested -  include: package_docker.yml +  include_tasks: package_docker.yml    when:      - not l_use_system_container      - not l_use_crio_only @@ -35,13 +35,13 @@    changed_when: false  - name: Use System Container Docker if Requested -  include: systemcontainer_docker.yml +  include_tasks: systemcontainer_docker.yml    when:      - l_use_system_container      - not l_use_crio_only  - name: Add CRI-O usage Requested -  include: systemcontainer_crio.yml +  include_tasks: systemcontainer_crio.yml    when:      - l_use_crio      - openshift_docker_is_node_or_master | bool @@ -60,10 +60,11 @@          state: stopped          name: "{{ openshift.docker.service_name }}" -    - name: "Ensure {{ docker_alt_storage_path }} exists" -      file: -        path: "{{ docker_alt_storage_path }}" -        state: directory +    - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}" +      command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" +      register: results +      failed_when: +        - results.rc != 0      - name: "Set the selinux context on {{ docker_alt_storage_path }}"        command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index 8121163a6..5437275a2 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -1,6 +1,6 @@  ---  - name: Get current installed Docker version -  command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" +  command: "{{ repoquery_installed }} --qf '%{version}' docker"    when: not openshift.common.is_atomic | bool    register: curr_docker_version    retries: 4 @@ -33,9 +33,10 @@  # Make sure Docker is installed, but does not update a running version.  # Docker upgrades are handled by a separate playbook. +# Note: The curr_docker_version.stdout check can be removed when https://github.com/ansible/ansible/issues/33187 gets fixed.  - name: Install Docker    package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present -  when: not openshift.common.is_atomic | bool +  when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != ''  - block:    # Extend the default Docker service unit file when using iptables-services @@ -157,4 +158,4 @@  - meta: flush_handlers  # This needs to run after docker is restarted to account for proxy settings. -- include: registry_auth.yml +- include_tasks: registry_auth.yml diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 3fe10454d..17800d4e5 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -3,16 +3,10 @@  # TODO: Much of this file is shared with container engine tasks  - set_fact:      l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" -  when: l2_docker_insecure_registries | bool  - set_fact:      l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}" -  when: l2_docker_additional_registries | bool -- set_fact: -    l_crio_registries: "{{ ['docker.io'] }}" -  when: not (l2_docker_additional_registries | bool)  - set_fact:      l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" -  when: l2_docker_additional_registries | bool  - set_fact:      l_openshift_image_tag: "{{ openshift_image_tag | string }}" @@ -162,7 +156,7 @@      state: directory  - name: setup firewall for CRI-O -  include: crio_firewall.yml +  include_tasks: crio_firewall.yml    static: yes  - name: Configure the CNI network @@ -182,6 +176,6 @@  # If we are using crio only, docker.service might not be available for  # 'docker login' -- include: registry_auth.yml +- include_tasks: registry_auth.yml    vars:      openshift_docker_alternative_creds: "{{ l_use_crio_only }}" diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 84220fa66..f69acb9a5 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -177,6 +177,6 @@  # Since docker is running as a system container, docker login will fail to create  # credentials.  Use alternate method if requiring authenticated registries. -- include: registry_auth.yml +- include_tasks: registry_auth.yml    vars:      openshift_docker_alternative_creds: True diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 4b734d4ed..a069e4d87 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -2,10 +2,18 @@  r_etcd_common_backup_tag: ''  r_etcd_common_backup_sufix_name: '' +l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" +  # runc, docker, host -r_etcd_common_etcd_runtime: "docker" +r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"  r_etcd_common_embedded_etcd: false +osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd' +etcd_image_dict: +  origin: "registry.fedoraproject.org/f26/etcd" +  openshift-enterprise: "{{ osm_etcd_image }}" +etcd_image: "{{ etcd_image_dict[openshift_deployment_type | default('origin')] }}" +  # etcd run on a host => use etcdctl command directly  # etcd run as a docker container => use docker exec  # etcd run as a runc container => use runc exec diff --git a/roles/etcd/tasks/backup.archive.yml b/roles/etcd/tasks/backup.archive.yml index 6daa6dc51..a29a90ea3 100644 --- a/roles/etcd/tasks/backup.archive.yml +++ b/roles/etcd/tasks/backup.archive.yml @@ -1,3 +1,3 @@  --- -- include: backup/vars.yml -- include: backup/archive.yml +- include_tasks: backup/vars.yml +- include_tasks: backup/archive.yml diff --git a/roles/etcd/tasks/backup.copy.yml b/roles/etcd/tasks/backup.copy.yml index cc540cbca..6e8502e3b 100644 --- a/roles/etcd/tasks/backup.copy.yml +++ b/roles/etcd/tasks/backup.copy.yml @@ -1,3 +1,3 @@  --- -- include: backup/vars.yml -- include: backup/copy.yml +- include_tasks: backup/vars.yml +- include_tasks: backup/copy.yml diff --git a/roles/etcd/tasks/backup.fetch.yml b/roles/etcd/tasks/backup.fetch.yml index 26ec15043..d33878804 100644 --- a/roles/etcd/tasks/backup.fetch.yml +++ b/roles/etcd/tasks/backup.fetch.yml @@ -1,3 +1,3 @@  --- -- include: backup/vars.yml -- include: backup/fetch.yml +- include_tasks: backup/vars.yml +- include_tasks: backup/fetch.yml diff --git a/roles/etcd/tasks/backup.force_new_cluster.yml b/roles/etcd/tasks/backup.force_new_cluster.yml index d2e866416..7dd0899ee 100644 --- a/roles/etcd/tasks/backup.force_new_cluster.yml +++ b/roles/etcd/tasks/backup.force_new_cluster.yml @@ -1,5 +1,5 @@  --- -- include: backup/vars.yml +- include_tasks: backup/vars.yml  - name: Move content of etcd backup under the etcd data directory    command: > @@ -9,4 +9,4 @@    command: >      chown -R etcd:etcd "{{ etcd_data_dir }}" -- include: auxiliary/force_new_cluster.yml +- include_tasks: auxiliary/force_new_cluster.yml diff --git a/roles/etcd/tasks/backup.unarchive.yml b/roles/etcd/tasks/backup.unarchive.yml index 77a637360..f92e87c3d 100644 --- a/roles/etcd/tasks/backup.unarchive.yml +++ b/roles/etcd/tasks/backup.unarchive.yml @@ -1,3 +1,3 @@  --- -- include: backup/vars.yml -- include: backup/unarchive.yml +- include_tasks: backup/vars.yml +- include_tasks: backup/unarchive.yml diff --git a/roles/etcd/tasks/backup.yml b/roles/etcd/tasks/backup.yml index c0538e596..60bb82100 100644 --- a/roles/etcd/tasks/backup.yml +++ b/roles/etcd/tasks/backup.yml @@ -1,2 +1,2 @@  --- -- include: backup/backup.yml +- include_tasks: backup/backup.yml diff --git a/roles/etcd/tasks/backup/backup.yml b/roles/etcd/tasks/backup/backup.yml index ca0d29155..afb84eb58 100644 --- a/roles/etcd/tasks/backup/backup.yml +++ b/roles/etcd/tasks/backup/backup.yml @@ -1,5 +1,5 @@  --- -- include: vars.yml +- include_tasks: vars.yml  # TODO: replace shell module with command and update later checks  - name: Check available disk space for etcd backup diff --git a/roles/etcd/tasks/backup_ca_certificates.yml b/roles/etcd/tasks/backup_ca_certificates.yml index a41b032f3..c87359900 100644 --- a/roles/etcd/tasks/backup_ca_certificates.yml +++ b/roles/etcd/tasks/backup_ca_certificates.yml @@ -1,2 +1,2 @@  --- -- include: certificates/backup_ca_certificates.yml +- include_tasks: certificates/backup_ca_certificates.yml diff --git a/roles/etcd/tasks/backup_generated_certificates.yml b/roles/etcd/tasks/backup_generated_certificates.yml index 8cf2a10cc..fa73ea590 100644 --- a/roles/etcd/tasks/backup_generated_certificates.yml +++ b/roles/etcd/tasks/backup_generated_certificates.yml @@ -1,2 +1,2 @@  --- -- include: certificates/backup_generated_certificates.yml +- include_tasks: certificates/backup_generated_certificates.yml diff --git a/roles/etcd/tasks/backup_master_etcd_certificates.yml b/roles/etcd/tasks/backup_master_etcd_certificates.yml index 129e1831c..5526825fa 100644 --- a/roles/etcd/tasks/backup_master_etcd_certificates.yml +++ b/roles/etcd/tasks/backup_master_etcd_certificates.yml @@ -1,2 +1,2 @@  --- -- include: certificates/backup_master_etcd_certificates.yml +- include_tasks: certificates/backup_master_etcd_certificates.yml diff --git a/roles/etcd/tasks/backup_server_certificates.yml b/roles/etcd/tasks/backup_server_certificates.yml index 267ffeb4d..5f3052be1 100644 --- a/roles/etcd/tasks/backup_server_certificates.yml +++ b/roles/etcd/tasks/backup_server_certificates.yml @@ -1,2 +1,2 @@  --- -- include: certificates/backup_server_certificates.yml +- include_tasks: certificates/backup_server_certificates.yml diff --git a/roles/etcd/tasks/ca.yml b/roles/etcd/tasks/ca.yml index cca1e9ad7..dd4b59e24 100644 --- a/roles/etcd/tasks/ca.yml +++ b/roles/etcd/tasks/ca.yml @@ -1,2 +1,2 @@  --- -- include: certificates/deploy_ca.yml +- include_tasks: certificates/deploy_ca.yml diff --git a/roles/etcd/tasks/check_cluster_health.yml b/roles/etcd/tasks/check_cluster_health.yml index 75c110972..3410528eb 100644 --- a/roles/etcd/tasks/check_cluster_health.yml +++ b/roles/etcd/tasks/check_cluster_health.yml @@ -1,2 +1,2 @@  --- -- include: migration/check_cluster_health.yml +- include_tasks: migration/check_cluster_health.yml diff --git a/roles/etcd/tasks/clean_data.yml b/roles/etcd/tasks/clean_data.yml index d131ffd21..12538c2d0 100644 --- a/roles/etcd/tasks/clean_data.yml +++ b/roles/etcd/tasks/clean_data.yml @@ -1,2 +1,2 @@  --- -- include: auxiliary/clean_data.yml +- include_tasks: auxiliary/clean_data.yml diff --git a/roles/etcd/tasks/client_certificates.yml b/roles/etcd/tasks/client_certificates.yml index 2f4108a0d..f3201816d 100644 --- a/roles/etcd/tasks/client_certificates.yml +++ b/roles/etcd/tasks/client_certificates.yml @@ -1,2 +1,2 @@  --- -- include: certificates/fetch_client_certificates_from_ca.yml +- include_tasks: certificates/fetch_client_certificates_from_ca.yml diff --git a/roles/etcd/tasks/disable_etcd.yml b/roles/etcd/tasks/disable_etcd.yml index 9202e6e48..55fb7f6ea 100644 --- a/roles/etcd/tasks/disable_etcd.yml +++ b/roles/etcd/tasks/disable_etcd.yml @@ -1,2 +1,2 @@  --- -- include: auxiliary/disable_etcd.yml +- include_tasks: auxiliary/disable_etcd.yml diff --git a/roles/etcd/tasks/distribute_ca b/roles/etcd/tasks/distribute_ca deleted file mode 100644 index 040c5f7af..000000000 --- a/roles/etcd/tasks/distribute_ca +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: certificates/distribute_ca.yml diff --git a/roles/etcd/tasks/distribute_ca.yml b/roles/etcd/tasks/distribute_ca.yml new file mode 100644 index 000000000..7d2607844 --- /dev/null +++ b/roles/etcd/tasks/distribute_ca.yml @@ -0,0 +1,2 @@ +--- +- include_tasks: certificates/distribute_ca.yml diff --git a/roles/etcd/tasks/drop_etcdctl.yml b/roles/etcd/tasks/drop_etcdctl.yml index 4c1f609f7..3258ab1a8 100644 --- a/roles/etcd/tasks/drop_etcdctl.yml +++ b/roles/etcd/tasks/drop_etcdctl.yml @@ -1,2 +1,2 @@  --- -- include: auxiliary/drop_etcdctl.yml +- include_tasks: auxiliary/drop_etcdctl.yml diff --git a/roles/etcd/tasks/fetch_backup.yml b/roles/etcd/tasks/fetch_backup.yml deleted file mode 100644 index 513eed17a..000000000 --- a/roles/etcd/tasks/fetch_backup.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- include: backup/vars.yml - -- include: backup/archive.yml - -- include: backup/sync_backup.yml - -- include: backup/ diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 3e69af314..5ee9335f5 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -7,20 +7,19 @@      etcd_ip: "{{ etcd_ip }}"  - name: setup firewall -  include: firewall.yml -  static: yes +  import_tasks: firewall.yml  - name: Install etcd    package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present    when: not etcd_is_containerized | bool -- include: drop_etcdctl.yml +- include_tasks: drop_etcdctl.yml    when:    - openshift_etcd_etcdctl_profile | default(true) | bool  - block:    - name: Pull etcd container -    command: docker pull {{ openshift.etcd.etcd_image }} +    command: docker pull {{ etcd_image }}      register: pull_result      changed_when: "'Downloaded newer image' in pull_result.stdout" @@ -30,7 +29,7 @@        src: etcd.docker.service    when:    - etcd_is_containerized | bool -  - not openshift.common.is_etcd_system_container | bool +  - not l_is_etcd_system_container | bool  # Start secondary etcd instance for third party integrations  # TODO: Determine an alternative to using thirdparty variable @@ -90,7 +89,7 @@        enabled: no        masked: yes        daemon_reload: yes -    when: not openshift.common.is_etcd_system_container | bool +    when: not l_is_etcd_system_container | bool      register: task_result      failed_when: task_result|failed and 'could not' not in task_result.msg|lower @@ -98,11 +97,11 @@      template:        dest: "/etc/systemd/system/etcd_container.service"        src: etcd.docker.service -    when: not openshift.common.is_etcd_system_container | bool +    when: not l_is_etcd_system_container | bool    - name: Install Etcd system container -    include: system_container.yml -    when: openshift.common.is_etcd_system_container | bool +    include_tasks: system_container.yml +    when: l_is_etcd_system_container | bool    when: etcd_is_containerized | bool  - name: Validate permissions on the config dir diff --git a/roles/etcd/tasks/migrate.add_ttls.yml b/roles/etcd/tasks/migrate.add_ttls.yml index bc27e4ea1..1dd3c9269 100644 --- a/roles/etcd/tasks/migrate.add_ttls.yml +++ b/roles/etcd/tasks/migrate.add_ttls.yml @@ -1,2 +1,2 @@  --- -- include: migration/add_ttls.yml +- include_tasks: migration/add_ttls.yml diff --git a/roles/etcd/tasks/migrate.configure_master.yml b/roles/etcd/tasks/migrate.configure_master.yml index 3ada6e362..5be9cebd7 100644 --- a/roles/etcd/tasks/migrate.configure_master.yml +++ b/roles/etcd/tasks/migrate.configure_master.yml @@ -1,2 +1,2 @@  --- -- include: migration/configure_master.yml +- include_tasks: migration/configure_master.yml diff --git a/roles/etcd/tasks/migrate.pre_check.yml b/roles/etcd/tasks/migrate.pre_check.yml index 124d21561..4cb67d322 100644 --- a/roles/etcd/tasks/migrate.pre_check.yml +++ b/roles/etcd/tasks/migrate.pre_check.yml @@ -1,2 +1,2 @@  --- -- include: migration/check.yml +- include_tasks: migration/check.yml diff --git a/roles/etcd/tasks/migrate.yml b/roles/etcd/tasks/migrate.yml index 5d5385873..1a75f63f1 100644 --- a/roles/etcd/tasks/migrate.yml +++ b/roles/etcd/tasks/migrate.yml @@ -1,2 +1,2 @@  --- -- include: migration/migrate.yml +- include_tasks: migration/migrate.yml diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml index 14625e49e..4bdc6bcc3 100644 --- a/roles/etcd/tasks/migration/add_ttls.yml +++ b/roles/etcd/tasks/migration/add_ttls.yml @@ -6,7 +6,7 @@  - set_fact:      accessTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.accessTokenMaxAgeSeconds | default(86400) }}" -    authroizeTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.authroizeTokenMaxAgeSeconds | default(500) }}" +    authorizeTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.authorizeTokenMaxAgeSeconds | default(500) }}"      controllerLeaseTTL: "{{ (g_master_config_output.content|b64decode|from_yaml).controllerLeaseTTL | default(30) }}"  - name: Re-introduce leases (as a replacement for key TTLs) @@ -29,6 +29,6 @@      - keys: "/openshift.io/oauth/accesstokens"        ttl: "{{ accessTokenMaxAgeSeconds }}s"      - keys: "/openshift.io/oauth/authorizetokens" -      ttl: "{{ authroizeTokenMaxAgeSeconds }}s" +      ttl: "{{ authorizeTokenMaxAgeSeconds }}s"      - keys: "/openshift.io/leases/controllers"        ttl: "{{ controllerLeaseTTL }}s" diff --git a/roles/etcd/tasks/migration/check.yml b/roles/etcd/tasks/migration/check.yml index 5c45e5ae1..8ef81da28 100644 --- a/roles/etcd/tasks/migration/check.yml +++ b/roles/etcd/tasks/migration/check.yml @@ -1,7 +1,7 @@  ---  # Check the cluster is healthy -- include: check_cluster_health.yml +- include_tasks: check_cluster_health.yml  # Check if there is at least one v2 snapshot  - name: Check if there is at least one v2 snapshot @@ -39,7 +39,7 @@  # - with_items not supported over block  # Check the cluster status for the first time -- include: check_cluster_status.yml +- include_tasks: check_cluster_status.yml  # Check the cluster status for the second time  - block: @@ -50,7 +50,7 @@        seconds: 5      when: not l_etcd_cluster_status_ok | bool -  - include: check_cluster_status.yml +  - include_tasks: check_cluster_status.yml      when: not l_etcd_cluster_status_ok | bool @@ -63,5 +63,5 @@        seconds: 5      when: not l_etcd_cluster_status_ok | bool -  - include: check_cluster_status.yml +  - include_tasks: check_cluster_status.yml      when: not l_etcd_cluster_status_ok | bool diff --git a/roles/etcd/tasks/remove_ca_certificates.yml b/roles/etcd/tasks/remove_ca_certificates.yml index 36df1a1cc..c1ea4e6c9 100644 --- a/roles/etcd/tasks/remove_ca_certificates.yml +++ b/roles/etcd/tasks/remove_ca_certificates.yml @@ -1,2 +1,2 @@  --- -- include: certificates/remove_ca_certificates.yml +- include_tasks: certificates/remove_ca_certificates.yml diff --git a/roles/etcd/tasks/remove_generated_certificates.yml b/roles/etcd/tasks/remove_generated_certificates.yml index b10a4b32d..8cdeea187 100644 --- a/roles/etcd/tasks/remove_generated_certificates.yml +++ b/roles/etcd/tasks/remove_generated_certificates.yml @@ -1,2 +1,2 @@  --- -- include: certificates/remove_generated_certificates.yml +- include_tasks: certificates/remove_generated_certificates.yml diff --git a/roles/etcd/tasks/restart.yml b/roles/etcd/tasks/restart.yml new file mode 100644 index 000000000..d4a016eec --- /dev/null +++ b/roles/etcd/tasks/restart.yml @@ -0,0 +1,21 @@ +--- + +- name: restart etcd +  service: +    name: "{{ etcd_service }}" +    state: restarted +  when: +    - not g_etcd_certificates_expired | default(false) | bool + +- name: stop etcd +  service: +    name: "{{ etcd_service }}" +    state: stopped +  when: +    - g_etcd_certificates_expired | default(false) | bool +- name: start etcd +  service: +    name: "{{ etcd_service }}" +    state: started +  when: +    - g_etcd_certificates_expired | default(false) | bool diff --git a/roles/etcd/tasks/retrieve_ca_certificates.yml b/roles/etcd/tasks/retrieve_ca_certificates.yml index bd6c4ec85..2184e669c 100644 --- a/roles/etcd/tasks/retrieve_ca_certificates.yml +++ b/roles/etcd/tasks/retrieve_ca_certificates.yml @@ -1,2 +1,2 @@  --- -- include: certificates/retrieve_ca_certificates.yml +- include_tasks: certificates/retrieve_ca_certificates.yml diff --git a/roles/etcd/tasks/server_certificates.yml b/roles/etcd/tasks/server_certificates.yml index ae26079f9..75c35d59e 100644 --- a/roles/etcd/tasks/server_certificates.yml +++ b/roles/etcd/tasks/server_certificates.yml @@ -1,6 +1,6 @@  --- -- include: ca.yml +- include_tasks: ca.yml    when:    - etcd_ca_setup | default(True) | bool -- include: certificates/fetch_server_certificates_from_ca.yml +- include_tasks: certificates/fetch_server_certificates_from_ca.yml diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml index f71d9b551..82ac4fc84 100644 --- a/roles/etcd/tasks/system_container.yml +++ b/roles/etcd/tasks/system_container.yml @@ -5,7 +5,7 @@      tasks_from: proxy  - name: Pull etcd system container -  command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }} +  command: atomic pull --storage=ostree {{ etcd_image }}    register: pull_result    changed_when: "'Pulling layer' in pull_result.stdout" @@ -57,7 +57,7 @@  - name: Install or Update Etcd system container package    oc_atomic_container:      name: etcd -    image: "{{ openshift.etcd.etcd_image }}" +    image: "{{ etcd_image }}"      state: latest      values:        - ETCD_DATA_DIR=/var/lib/etcd diff --git a/roles/etcd/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade_image.yml index 9e69027eb..35385cb9a 100644 --- a/roles/etcd/tasks/upgrade_image.yml +++ b/roles/etcd/tasks/upgrade_image.yml @@ -1,2 +1,2 @@  --- -- include: upgrade/upgrade_image.yml +- include_tasks: upgrade/upgrade_image.yml diff --git a/roles/etcd/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade_rpm.yml index 29603d2b6..fbd3cd919 100644 --- a/roles/etcd/tasks/upgrade_rpm.yml +++ b/roles/etcd/tasks/upgrade_rpm.yml @@ -1,2 +1,2 @@  --- -- include: upgrade/upgrade_rpm.yml +- include_tasks: upgrade/upgrade_rpm.yml diff --git a/roles/etcd/tasks/version_detect.yml b/roles/etcd/tasks/version_detect.yml new file mode 100644 index 000000000..fe1e418d8 --- /dev/null +++ b/roles/etcd/tasks/version_detect.yml @@ -0,0 +1,55 @@ +--- +- block: +  - name: Record RPM based etcd version +    command: rpm -qa --qf '%{version}' etcd\* +    args: +      warn: no +    register: etcd_rpm_version +    failed_when: false +    # AUDIT:changed_when: `false` because we are only inspecting +    # state, not manipulating anything +    changed_when: false +  - debug: +      msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected" +  when: +  - not openshift.common.is_containerized | bool + +- block: +  - name: Record containerized etcd version (docker) +    command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* +    register: etcd_container_version_docker +    failed_when: false +    # AUDIT:changed_when: `false` because we are only inspecting +    # state, not manipulating anything +    changed_when: false +    when: +    - not l_is_etcd_system_container | bool + +    # Given a register variables is set even if the whwen condition +    # is false, we need to set etcd_container_version separately +  - set_fact: +      etcd_container_version: "{{ etcd_container_version_docker.stdout }}" +    when: +    - not l_is_etcd_system_container | bool + +  - name: Record containerized etcd version (runc) +    command: runc exec etcd rpm -qa --qf '%{version}' etcd\* +    register: etcd_container_version_runc +    failed_when: false +    # AUDIT:changed_when: `false` because we are only inspecting +    # state, not manipulating anything +    changed_when: false +    when: +    - l_is_etcd_system_container | bool + +    # Given a register variables is set even if the whwen condition +    # is false, we need to set etcd_container_version separately +  - set_fact: +      etcd_container_version: "{{ etcd_container_version_runc.stdout }}" +    when: +    - l_is_etcd_system_container | bool + +  - debug: +      msg: "Etcd containerized version {{ etcd_container_version }} detected" +  when: +  - openshift.common.is_containerized | bool diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service index adeca7a91..99ae37319 100644 --- a/roles/etcd/templates/etcd.docker.service +++ b/roles/etcd/templates/etcd.docker.service @@ -7,7 +7,7 @@ PartOf={{ openshift.docker.service_name }}.service  [Service]  EnvironmentFile={{ etcd_conf_file }}  ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }} -ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }} +ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ etcd_image }}  ExecStop=/usr/bin/docker stop {{ etcd_service }}  SyslogIdentifier=etcd_container  Restart=always diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md index f8588c4bf..68c0357b6 100644 --- a/roles/installer_checkpoint/README.md +++ b/roles/installer_checkpoint/README.md @@ -64,11 +64,11 @@ phase are stored in the `phase_attributes` variable.              },              'installer_phase_etcd': {                  'title': 'etcd Install', -                'playbook': 'playbooks/byo/openshift-etcd/config.yml' +                'playbook': 'playbooks/openshift-etcd/config.yml'              },              'installer_phase_nfs': {                  'title': 'NFS Install', -                'playbook': 'playbooks/byo/openshift-nfs/config.yml' +                'playbook': 'playbooks/openshift-nfs/config.yml'              },              #...          } @@ -160,7 +160,7 @@ Health Check               : Complete (0:01:10)  etcd Install               : Complete (0:02:58)  Master Install             : Complete (0:09:20)  Master Additional Install  : In Progress (0:20:04) -    This phase can be restarted by running: playbooks/byo/openshift-master/additional_config.yml +    This phase can be restarted by running: playbooks/openshift-master/additional_config.yml  ```  [set_stats]: http://docs.ansible.com/ansible/latest/set_stats_module.html diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index d8bdea343..57444a2a5 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -50,27 +50,27 @@ class CallbackModule(CallbackBase):              },              'installer_phase_etcd': {                  'title': 'etcd Install', -                'playbook': 'playbooks/byo/openshift-etcd/config.yml' +                'playbook': 'playbooks/openshift-etcd/config.yml'              },              'installer_phase_nfs': {                  'title': 'NFS Install', -                'playbook': 'playbooks/byo/openshift-nfs/config.yml' +                'playbook': 'playbooks/openshift-nfs/config.yml'              },              'installer_phase_loadbalancer': {                  'title': 'Load balancer Install', -                'playbook': 'playbooks/byo/openshift-loadbalancer/config.yml' +                'playbook': 'playbooks/openshift-loadbalancer/config.yml'              },              'installer_phase_master': {                  'title': 'Master Install', -                'playbook': 'playbooks/byo/openshift-master/config.yml' +                'playbook': 'playbooks/openshift-master/config.yml'              },              'installer_phase_master_additional': {                  'title': 'Master Additional Install', -                'playbook': 'playbooks/byo/openshift-master/additional_config.yml' +                'playbook': 'playbooks/openshift-master/additional_config.yml'              },              'installer_phase_node': {                  'title': 'Node Install', -                'playbook': 'playbooks/byo/openshift-node/config.yml' +                'playbook': 'playbooks/openshift-node/config.yml'              },              'installer_phase_glusterfs': {                  'title': 'GlusterFS Install', @@ -78,11 +78,11 @@ class CallbackModule(CallbackBase):              },              'installer_phase_hosted': {                  'title': 'Hosted Install', -                'playbook': 'playbooks/byo/openshift-cluster/openshift-hosted.yml' +                'playbook': 'playbooks/openshift-hosted/config.yml'              },              'installer_phase_metrics': {                  'title': 'Metrics Install', -                'playbook': 'playbooks/byo/openshift-cluster/openshift-metrics.yml' +                'playbook': 'playbooks/openshift-metrics/config.yml'              },              'installer_phase_logging': {                  'title': 'Logging Install', @@ -90,15 +90,15 @@ class CallbackModule(CallbackBase):              },              'installer_phase_prometheus': {                  'title': 'Prometheus Install', -                'playbook': 'playbooks/byo/openshift-cluster/openshift-prometheus.yml' +                'playbook': 'playbooks/openshift-prometheus/config.yml'              },              'installer_phase_servicecatalog': {                  'title': 'Service Catalog Install', -                'playbook': 'playbooks/byo/openshift-cluster/service-catalog.yml' +                'playbook': 'playbooks/openshift-service-catalog/config.yml'              },              'installer_phase_management': {                  'title': 'Management Install', -                'playbook': 'playbooks/byo/openshift-management/config.yml' +                'playbook': 'playbooks/openshift-management/config.yml'              },          } diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml index 55ab16f74..1cc6d2375 100644 --- a/roles/kuryr/tasks/master.yaml +++ b/roles/kuryr/tasks/master.yaml @@ -1,6 +1,6 @@  --- -- name: Perform OpenShit ServiceAccount config -  include: serviceaccount.yaml +- name: Perform OpenShift ServiceAccount config +  include_tasks: serviceaccount.yaml  - name: Create kuryr manifests tempdir    command: mktemp -d diff --git a/roles/nickhammond.logrotate/templates/logrotate.d.j2 b/roles/nickhammond.logrotate/templates/logrotate.d.j2 index 6453be6b2..1ad1c595c 100644 --- a/roles/nickhammond.logrotate/templates/logrotate.d.j2 +++ b/roles/nickhammond.logrotate/templates/logrotate.d.j2 @@ -7,7 +7,7 @@    {% endfor -%}    {% endif %}    {%- if item.scripts is defined -%} -  {%- for name, script in item.scripts.iteritems() -%} +  {%- for name, script in item.scripts.items() -%}    {{ name }}      {{ script }}    endscript diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index f3c487132..c264427de 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -1,7 +1,6 @@  ---  - name: setup firewall -  include: firewall.yml -  static: yes +  import_tasks: firewall.yml  - name: Set the Nuage certificate directory fact for Atomic hosts    set_fact: @@ -62,7 +61,7 @@    become: yes    file: path={{ nuage_mon_rest_server_logdir }} state=directory -- include: serviceaccount.yml +- include_tasks: serviceaccount.yml  - name: Download the certs and keys    become: yes @@ -82,7 +81,7 @@      - nuage.key      - nuage.kubeconfig -- include: certificates.yml +- include_tasks: certificates.yml  - name: Install Nuage VSD user certificate    become: yes diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml index 9db9dbb6a..c6b7a9b10 100644 --- a/roles/nuage_node/tasks/main.yaml +++ b/roles/nuage_node/tasks/main.yaml @@ -31,7 +31,7 @@      - nuage.key      - nuage.kubeconfig -- include: certificates.yml +- include_tasks: certificates.yml  - name: Add additional Docker mounts for Nuage for atomic hosts    become: yes @@ -44,8 +44,7 @@      - restart node    ignore_errors: true -- include: iptables.yml +- include_tasks: iptables.yml  - name: setup firewall -  include: firewall.yml -  static: yes +  import_tasks: firewall.yml diff --git a/roles/openshift_cli/defaults/main.yml b/roles/openshift_cli/defaults/main.yml index ed97d539c..82da0639e 100644 --- a/roles/openshift_cli/defaults/main.yml +++ b/roles/openshift_cli/defaults/main.yml @@ -1 +1,6 @@  --- +system_images_registry_dict: +  openshift-enterprise: "registry.access.redhat.com" +  origin: "docker.io" + +system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}" diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 14d8a3325..6aa15d568 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -12,13 +12,13 @@  - block:    - name: Pull CLI Image      command: > -      docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} +      docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }}      register: pull_result      changed_when: "'Downloaded newer image' in pull_result.stdout"    - name: Copy client binaries/symlinks out of CLI image for use on the host      openshift_container_binary_sync: -      image: "{{ openshift.common.cli_image }}" +      image: "{{ openshift_cli_image }}"        tag: "{{ openshift_image_tag }}"        backend: "docker"    when: @@ -28,13 +28,13 @@  - block:    - name: Pull CLI Image      command: > -      atomic pull --storage ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}:{{ openshift_image_tag }} +      atomic pull --storage ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}:{{ openshift_image_tag }}      register: pull_result      changed_when: "'Pulling layer' in pull_result.stdout"    - name: Copy client binaries/symlinks out of CLI image for use on the host      openshift_container_binary_sync: -      image: "{{ '' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}" +      image: "{{ '' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}"        tag: "{{ openshift_image_tag }}"        backend: "atomic"    when: diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml index ab3055c8b..dff492a69 100644 --- a/roles/openshift_cloud_provider/tasks/main.yml +++ b/roles/openshift_cloud_provider/tasks/main.yml @@ -11,11 +11,11 @@      state: directory    when: has_cloudprovider | bool -- include: openstack.yml +- include_tasks: openstack.yml    when: cloudprovider_is_openstack | bool -- include: aws.yml +- include_tasks: aws.yml    when: cloudprovider_is_aws | bool -- include: gce.yml +- include_tasks: gce.yml    when: cloudprovider_is_gce | bool diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml index de36b201b..7cc548f69 100644 --- a/roles/openshift_etcd/meta/main.yml +++ b/roles/openshift_etcd/meta/main.yml @@ -13,7 +13,6 @@ galaxy_info:    - cloud  dependencies:  - role: openshift_etcd_facts -- role: openshift_clock  - role: openshift_docker    when: openshift.common.is_containerized | bool  - role: etcd diff --git a/roles/openshift_etcd_facts/tasks/main.yml b/roles/openshift_etcd_facts/tasks/main.yml index 22fb39006..ed97d539c 100644 --- a/roles/openshift_etcd_facts/tasks/main.yml +++ b/roles/openshift_etcd_facts/tasks/main.yml @@ -1,5 +1 @@  --- -- openshift_facts: -    role: etcd -    local_facts: -      etcd_image: "{{ osm_etcd_image | default(None) }}" diff --git a/roles/openshift_excluder/tasks/disable.yml b/roles/openshift_excluder/tasks/disable.yml index 5add25b45..21801b994 100644 --- a/roles/openshift_excluder/tasks/disable.yml +++ b/roles/openshift_excluder/tasks/disable.yml @@ -2,11 +2,11 @@  - when: r_openshift_excluder_verify_upgrade    block:    - name: Include verify_upgrade.yml when upgrading -    include: verify_upgrade.yml +    include_tasks: verify_upgrade.yml  # unexclude the current openshift/origin-excluder if it is installed so it can be updated  - name: Disable excluders before the upgrade to remove older excluding expressions -  include: unexclude.yml +  include_tasks: unexclude.yml    vars:      # before the docker excluder can be updated, it needs to be disabled      # to remove older excluded packages that are no longer excluded @@ -15,12 +15,12 @@  # Install any excluder that is enabled  - name: Include install.yml -  include: install.yml +  include_tasks: install.yml  # And finally adjust an excluder in order to update host components correctly. First  # exclude then unexclude  - name: Include exclude.yml -  include: exclude.yml +  include_tasks: exclude.yml    vars:      # Enable the docker excluder only if it is overridden      # BZ #1430612: docker excluders should be enabled even during installation and upgrade @@ -30,7 +30,7 @@  # All excluders that are to be disabled are disabled  - name: Include unexclude.yml -  include: unexclude.yml +  include_tasks: unexclude.yml    vars:      # If the docker override  is not set, default to the generic behaviour      # BZ #1430612: docker excluders should be enabled even during installation and upgrade diff --git a/roles/openshift_excluder/tasks/enable.yml b/roles/openshift_excluder/tasks/enable.yml index fce44cfb5..7c3742a06 100644 --- a/roles/openshift_excluder/tasks/enable.yml +++ b/roles/openshift_excluder/tasks/enable.yml @@ -1,6 +1,6 @@  ---  - name: Install excluders -  include: install.yml +  include_tasks: install.yml  - name: Enable excluders -  include: exclude.yml +  include_tasks: exclude.yml diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml index db20b4012..93d6ef149 100644 --- a/roles/openshift_excluder/tasks/main.yml +++ b/roles/openshift_excluder/tasks/main.yml @@ -32,7 +32,7 @@      - r_openshift_excluder_upgrade_target is not defined    - name: Include main action task file -    include: "{{ r_openshift_excluder_action }}.yml" +    include_tasks: "{{ r_openshift_excluder_action }}.yml"    when:    - not ostree_booted.stat.exists | bool diff --git a/roles/openshift_excluder/tasks/verify_upgrade.yml b/roles/openshift_excluder/tasks/verify_upgrade.yml index 42026664a..b55a9af23 100644 --- a/roles/openshift_excluder/tasks/verify_upgrade.yml +++ b/roles/openshift_excluder/tasks/verify_upgrade.yml @@ -1,12 +1,12 @@  ---  - name: Verify Docker Excluder version -  include: verify_excluder.yml +  include_tasks: verify_excluder.yml    vars:      excluder: "{{ r_openshift_excluder_service_type }}-docker-excluder"    when: r_openshift_excluder_enable_docker_excluder | bool  - name: Verify OpenShift Excluder version -  include: verify_excluder.yml +  include_tasks: verify_excluder.yml    vars:      excluder: "{{ r_openshift_excluder_service_type }}-excluder"    when: r_openshift_excluder_enable_openshift_excluder | bool diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml new file mode 100644 index 000000000..7064d727a --- /dev/null +++ b/roles/openshift_facts/defaults/main.yml @@ -0,0 +1,6 @@ +--- +openshift_cli_image_dict: +  origin: 'openshift/origin' +  openshift-enterprise: 'openshift3/ose' + +openshift_cli_image: "{{ osm_image | default(openshift_cli_image_dict[openshift_deployment_type]) }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 99ebb7e36..44983cfd6 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -446,24 +446,6 @@ def normalize_provider_facts(provider, metadata):      return facts -def set_node_schedulability(facts): -    """ Set schedulable facts if not already present in facts dict -        Args: -            facts (dict): existing facts -        Returns: -            dict: the facts dict updated with the generated schedulable -            facts if they were not already present - -    """ -    if 'node' in facts: -        if 'schedulable' not in facts['node']: -            if 'master' in facts: -                facts['node']['schedulable'] = False -            else: -                facts['node']['schedulable'] = True -    return facts - -  # pylint: disable=too-many-branches  def set_selectors(facts):      """ Set selectors facts if not already present in facts dict @@ -516,21 +498,6 @@ def set_selectors(facts):      return facts -def set_dnsmasq_facts_if_unset(facts): -    """ Set dnsmasq facts if not already present in facts -    Args: -        facts (dict) existing facts -    Returns: -        facts (dict) updated facts with values set if not previously set -    """ - -    if 'common' in facts: -        if 'master' in facts and 'dns_port' not in facts['master']: -            facts['master']['dns_port'] = 8053 - -    return facts - -  def set_project_cfg_facts_if_unset(facts):      """ Set Project Configuration facts if not already present in facts dict              dict: @@ -1563,7 +1530,8 @@ def set_builddefaults_facts(facts):                  # Scaffold out the full expected datastructure                  facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}              facts['master']['admission_plugin_config'].update(builddefaults['config']) -            delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env']) +            if 'env' in facts['master']['admission_plugin_config']['BuildDefaults']['configuration']: +                delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])      return facts @@ -1627,20 +1595,16 @@ def set_container_facts_if_unset(facts):      deployment_type = facts['common']['deployment_type']      if deployment_type == 'openshift-enterprise':          master_image = 'openshift3/ose' -        cli_image = master_image          node_image = 'openshift3/node'          ovs_image = 'openshift3/openvswitch' -        etcd_image = 'registry.access.redhat.com/rhel7/etcd'          pod_image = 'openshift3/ose-pod'          router_image = 'openshift3/ose-haproxy-router'          registry_image = 'openshift3/ose-docker-registry'          deployer_image = 'openshift3/ose-deployer'      else:          master_image = 'openshift/origin' -        cli_image = master_image          node_image = 'openshift/node'          ovs_image = 'openshift/openvswitch' -        etcd_image = 'registry.access.redhat.com/rhel7/etcd'          pod_image = 'openshift/origin-pod'          router_image = 'openshift/origin-haproxy-router'          registry_image = 'openshift/origin-docker-registry' @@ -1657,8 +1621,6 @@ def set_container_facts_if_unset(facts):      if 'is_containerized' not in facts['common']:          facts['common']['is_containerized'] = facts['common']['is_atomic'] -    if 'cli_image' not in facts['common']: -        facts['common']['cli_image'] = cli_image      if 'pod_image' not in facts['common']:          facts['common']['pod_image'] = pod_image      if 'router_image' not in facts['common']: @@ -1667,8 +1629,6 @@ def set_container_facts_if_unset(facts):          facts['common']['registry_image'] = registry_image      if 'deployer_image' not in facts['common']:          facts['common']['deployer_image'] = deployer_image -    if 'etcd' in facts and 'etcd_image' not in facts['etcd']: -        facts['etcd']['etcd_image'] = etcd_image      if 'master' in facts and 'master_image' not in facts['master']:          facts['master']['master_image'] = master_image          facts['master']['master_system_image'] = master_image @@ -1841,7 +1801,6 @@ class OpenShiftFacts(object):          facts['current_config'] = get_current_config(facts)          facts = set_url_facts_if_unset(facts)          facts = set_project_cfg_facts_if_unset(facts) -        facts = set_node_schedulability(facts)          facts = set_selectors(facts)          facts = set_identity_providers_if_unset(facts)          facts = set_deployment_facts_if_unset(facts) @@ -1851,7 +1810,6 @@ class OpenShiftFacts(object):          facts = build_controller_args(facts)          facts = build_api_server_args(facts)          facts = set_version_facts_if_unset(facts) -        facts = set_dnsmasq_facts_if_unset(facts)          facts = set_aggregate_facts(facts)          facts = set_etcd_facts_if_unset(facts)          facts = set_proxy_facts(facts) @@ -2256,14 +2214,27 @@ class OpenShiftFacts(object):                  oo_env_facts = dict()                  current_level = oo_env_facts                  keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:] +                  if len(keys) > 0 and keys[0] != self.role:                      continue -                for key in keys: -                    if key == keys[-1]: -                        current_level[key] = value -                    elif key not in current_level: -                        current_level[key] = dict() -                        current_level = current_level[key] + +                # Build a dictionary from the split fact keys. +                # After this loop oo_env_facts is the resultant dictionary. +                # For example: +                # fact = "openshift_metrics_install_metrics" +                # value = 'true' +                # keys = ['metrics', 'install', 'metrics'] +                # result = {'metrics': {'install': {'metrics': 'true'}}} +                for i, _ in enumerate(keys): +                    # This is the last key. Set the value. +                    if i == (len(keys) - 1): +                        current_level[keys[i]] = value +                    # This is a key other than the last key. Set as +                    # dictionary and continue. +                    else: +                        current_level[keys[i]] = dict() +                        current_level = current_level[keys[i]] +                  facts_to_set = merge_facts(orig=facts_to_set,                                             new=oo_env_facts,                                             additive_facts_to_overwrite=[], diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml index c2954fde1..81241535b 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml @@ -12,7 +12,7 @@      namespace: "{{ openshift_hosted_registry_namespace }}"      state: list      kind: pod -    selector: "{% for label, value in registry_dc.results.results[0].spec.selector.iteritems() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}" +    selector: "{% for label, value in registry_dc.results.results[0].spec.selector.items() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}"    register: registry_pods    until:    - "registry_pods.results.results[0]['items'] | count > 0" @@ -79,14 +79,7 @@        - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'    when: openshift.hosted.registry.storage.glusterfs.swap -- name: Unmount registry volume +- name: Unmount registry volume and clean up mount point/fstab    mount: -    state: unmounted -    name: "{{ mktemp.stdout }}" - -- name: Delete temp mount directory -  file: -    dest: "{{ mktemp.stdout }}"      state: absent -  changed_when: False -  check_mode: no +    name: "{{ mktemp.stdout }}" diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml index 69b061fc5..c87a327a4 100644 --- a/roles/openshift_loadbalancer/tasks/main.yml +++ b/roles/openshift_loadbalancer/tasks/main.yml @@ -1,7 +1,6 @@  ---  - name: setup firewall -  include: firewall.yml -  static: yes +  import_tasks: firewall.yml  - name: Install haproxy    package: name=haproxy state=present diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2 index 462128366..8acff8141 100644 --- a/roles/openshift_logging_curator/templates/curator.j2 +++ b/roles/openshift_logging_curator/templates/curator.j2 @@ -30,7 +30,7 @@ spec:        serviceAccountName: aggregated-logging-curator  {% if curator_node_selector is iterable and curator_node_selector | length > 0 %}        nodeSelector: -{% for key, value in curator_node_selector.iteritems() %} +{% for key, value in curator_node_selector.items() %}          {{key}}: "{{value}}"  {% endfor %}  {% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 0c7d8b46e..0bfa9e85b 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -34,7 +34,7 @@ spec:  {% endfor %}  {% if es_node_selector is iterable and es_node_selector | length > 0 %}        nodeSelector: -{% for key, value in es_node_selector.iteritems() %} +{% for key, value in es_node_selector.items() %}          {{key}}: "{{value}}"  {% endfor %}  {% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2 index 063f9c5ae..3c6896df4 100644 --- a/roles/openshift_logging_elasticsearch/templates/pvc.j2 +++ b/roles/openshift_logging_elasticsearch/templates/pvc.j2 @@ -6,7 +6,7 @@ metadata:      logging-infra: support  {% if annotations is defined %}    annotations: -{% for key,value in annotations.iteritems() %} +{% for key,value in annotations.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %} @@ -14,7 +14,7 @@ spec:  {% if pv_selector is defined and pv_selector is mapping %}    selector:      matchLabels: -{% for key,value in pv_selector.iteritems() %} +{% for key,value in pv_selector.items() %}        {{key}}: {{value}}  {% endfor %}  {% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 b/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 index cf8a9e65f..d2e8b8bcb 100644 --- a/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 +++ b/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 @@ -4,7 +4,7 @@ metadata:    name: "{{obj_name}}"  {% if labels is defined%}    labels: -{% for key, value in labels.iteritems() %} +{% for key, value in labels.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %} diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 index 5a4f7f762..3bd29163b 100644 --- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 +++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 @@ -42,7 +42,7 @@ objects:          component: eventrouter          logging-infra: eventrouter          provider: openshift -      replicas: ${REPLICAS} +      replicas: "${{ '{{' }}REPLICAS{{ '}}' }}"        template:          metadata:            labels: @@ -55,7 +55,7 @@ objects:            serviceAccountName: aggregated-logging-eventrouter  {% if node_selector is iterable and node_selector | length > 0 %}            nodeSelector: -{% for key, value in node_selector.iteritems() %} +{% for key, value in node_selector.items() %}              {{ key }}: "{{ value }}"  {% endfor %}  {% endif %} diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2 index 4ff86729a..57d216373 100644 --- a/roles/openshift_logging_kibana/templates/kibana.j2 +++ b/roles/openshift_logging_kibana/templates/kibana.j2 @@ -29,7 +29,7 @@ spec:        serviceAccountName: aggregated-logging-kibana  {% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %}        nodeSelector: -{% for key, value in kibana_node_selector.iteritems() %} +{% for key, value in kibana_node_selector.items() %}          {{ key }}: "{{ value }}"  {% endfor %}  {% endif %} diff --git a/roles/openshift_logging_kibana/templates/route_reencrypt.j2 b/roles/openshift_logging_kibana/templates/route_reencrypt.j2 index cf8a9e65f..d2e8b8bcb 100644 --- a/roles/openshift_logging_kibana/templates/route_reencrypt.j2 +++ b/roles/openshift_logging_kibana/templates/route_reencrypt.j2 @@ -4,7 +4,7 @@ metadata:    name: "{{obj_name}}"  {% if labels is defined%}    labels: -{% for key, value in labels.iteritems() %} +{% for key, value in labels.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %} diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2 index cfb13d59b..2337c33d5 100644 --- a/roles/openshift_logging_mux/templates/mux.j2 +++ b/roles/openshift_logging_mux/templates/mux.j2 @@ -29,7 +29,7 @@ spec:        serviceAccountName: aggregated-logging-mux  {% if mux_node_selector is iterable and mux_node_selector | length > 0 %}        nodeSelector: -{% for key, value in mux_node_selector.iteritems() %} +{% for key, value in mux_node_selector.items() %}          {{key}}: "{{value}}"  {% endfor %}  {% endif %} @@ -59,7 +59,7 @@ spec:  {%   endif %}  {% endif %}          ports: -        - containerPort: "{{ openshift_logging_mux_port }}" +        - containerPort: {{ openshift_logging_mux_port }}            name: mux-forward          volumeMounts:          - name: config diff --git a/roles/openshift_manage_node/defaults/main.yml b/roles/openshift_manage_node/defaults/main.yml new file mode 100644 index 000000000..f0e728a3f --- /dev/null +++ b/roles/openshift_manage_node/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# openshift_manage_node_is_master is set at the play level. +openshift_manage_node_is_master: False + +# Default is to be schedulable except for master nodes. +l_openshift_manage_schedulable: "{{ openshift_schedulable | default(not openshift_manage_node_is_master) }}" diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index fbbac1176..247757ca9 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -37,7 +37,7 @@  - name: Set node schedulability    oc_adm_manage_node:      node: "{{ openshift.node.nodename | lower }}" -    schedulable: "{{ 'true' if openshift.node.schedulable | bool else 'false' }}" +    schedulable: "{{ 'true' if l_openshift_manage_schedulable | bool else 'false' }}"    retries: 10    delay: 5    register: node_schedulable diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 3fb94fff8..1fe7086bc 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -7,6 +7,16 @@ openshift_master_debug_level: "{{ debug_level | default(2) }}"  r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"  r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" +system_images_registry_dict: +  openshift-enterprise: "registry.access.redhat.com" +  origin: "docker.io" + +system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}" + +l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + +openshift_master_dns_port: 8053 +  openshift_node_ips: []  r_openshift_master_clean_install: false  r_openshift_master_etcd3_storage: false @@ -18,9 +28,9 @@ default_r_openshift_master_os_firewall_allow:  - service: api controllers https    port: "{{ openshift.master.controllers_port }}/tcp"  - service: skydns tcp -  port: "{{ openshift.master.dns_port }}/tcp" +  port: "{{ openshift_master_dns_port }}/tcp"  - service: skydns udp -  port: "{{ openshift.master.dns_port }}/udp" +  port: "{{ openshift_master_dns_port }}/udp"  - service: etcd embedded    port: 4001/tcp    cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index b6d3539b1..d570a1c7f 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -31,8 +31,7 @@    - openshift.common.is_containerized | bool  - name: Open up firewall ports -  include: firewall.yml -  static: yes +  import_tasks: firewall.yml  - name: Install Master package    package: @@ -172,16 +171,16 @@        no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"  - name: Update journald config -  include: journald.yml +  include_tasks: journald.yml  - name: Install the systemd units -  include: systemd_units.yml +  include_tasks: systemd_units.yml  - name: Install Master system container -  include: system_container.yml +  include_tasks: system_container.yml    when:    - openshift.common.is_containerized | bool -  - openshift.common.is_master_system_container | bool +  - l_is_master_system_container | bool  - name: Create session secrets file    template: @@ -212,10 +211,10 @@    - restart master api    - restart master controllers -- include: bootstrap_settings.yml +- include_tasks: bootstrap_settings.yml    when: openshift_master_bootstrap_enabled | default(False) -- include: set_loopback_context.yml +- include_tasks: set_loopback_context.yml  - name: Start and enable master api on first master    systemd: @@ -273,7 +272,7 @@  # A separate wait is required here for native HA since notifies will  # be resolved after all tasks in the role. -- include: check_master_api_is_ready.yml +- include_tasks: check_master_api_is_ready.yml    when:    - openshift.master.cluster_method == 'native'    - master_api_service_status_changed | bool @@ -323,5 +322,5 @@    - l_install_result | changed  - name: node bootstrap settings -  include: bootstrap.yml +  include_tasks: bootstrap.yml    when: openshift_master_bootstrap_enabled | default(False) diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml index 843352532..23386f11b 100644 --- a/roles/openshift_master/tasks/system_container.yml +++ b/roles/openshift_master/tasks/system_container.yml @@ -6,7 +6,7 @@  - name: Pre-pull master system container image    command: > -    atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }} +    atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}    register: l_pull_result    changed_when: "'Pulling layer' in l_pull_result.stdout" @@ -18,7 +18,7 @@  - name: Install or Update HA api master system container    oc_atomic_container:      name: "{{ openshift.common.service_type }}-master-api" -    image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" +    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"      state: latest      values:      - COMMAND=api @@ -26,7 +26,7 @@  - name: Install or Update HA controller master system container    oc_atomic_container:      name: "{{ openshift.common.service_type }}-master-controllers" -    image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" +    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"      state: latest      values:      - COMMAND=controllers diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index b0fa72f19..9d11ed574 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -9,7 +9,7 @@    when:    - openshift.common.is_containerized | bool -- include: registry_auth.yml +- include_tasks: registry_auth.yml  - name: Disable the legacy master service if it exists    systemd: @@ -26,7 +26,7 @@    ignore_errors: true    when:    - openshift.master.cluster_method == "native" -  - not openshift.common.is_master_system_container | bool +  - not l_is_master_system_container | bool  # This is the image used for both HA and non-HA clusters:  - name: Pre-pull master image @@ -36,7 +36,7 @@    changed_when: "'Downloaded newer image' in l_pull_result.stdout"    when:    - openshift.common.is_containerized | bool -  - not openshift.common.is_master_system_container | bool +  - not l_is_master_system_container | bool  - name: Create the ha systemd unit files    template: @@ -44,7 +44,7 @@      dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"    when:    - openshift.master.cluster_method == "native" -  - not openshift.common.is_master_system_container | bool +  - not l_is_master_system_container | bool    with_items:    - api    - controllers @@ -64,7 +64,7 @@    - controllers    when:    - openshift.master.cluster_method == "native" -  - not openshift.common.is_master_system_container | bool +  - not l_is_master_system_container | bool  - name: Preserve Master API Proxy Config options    command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api diff --git a/roles/openshift_master/tasks/upgrade.yml b/roles/openshift_master/tasks/upgrade.yml index 92371921d..f84cf2f6e 100644 --- a/roles/openshift_master/tasks/upgrade.yml +++ b/roles/openshift_master/tasks/upgrade.yml @@ -1,16 +1,16 @@  --- -- include: upgrade/rpm_upgrade.yml +- include_tasks: upgrade/rpm_upgrade.yml    when: not openshift.common.is_containerized | bool -- include: upgrade/upgrade_scheduler.yml +- include_tasks: upgrade/upgrade_scheduler.yml  # master_config_hook is passed in from upgrade play. -- include: "upgrade/{{ master_config_hook }}" +- include_tasks: "upgrade/{{ master_config_hook }}"    when: master_config_hook is defined -- include: journald.yml +- include_tasks: journald.yml -- include: systemd_units.yml +- include_tasks: systemd_units.yml  - name: Check for ca-bundle.crt    stat: diff --git a/roles/openshift_master/templates/htpasswd.j2 b/roles/openshift_master/templates/htpasswd.j2 index ba2c02e20..7e2e05076 100644 --- a/roles/openshift_master/templates/htpasswd.j2 +++ b/roles/openshift_master/templates/htpasswd.j2 @@ -1,5 +1,5 @@  {% if 'htpasswd_users' in openshift.master %} -{%   for user,pass in openshift.master.htpasswd_users.iteritems() %} +{%   for user,pass in openshift.master.htpasswd_users.items() %}  {{     user ~ ':' ~ pass }}  {%   endfor %}  {% endif %} diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 629fe3286..d5dd9e9c0 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -65,7 +65,7 @@ disabledFeatures: {{ openshift.master.disabled_features | to_json }}  {% endif %}  {% if openshift.master.embedded_dns | bool %}  dnsConfig: -  bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }} +  bindAddress: {{ openshift.master.bind_addr }}:{{ openshift_master_dns_port }}    bindNetwork: tcp4  {% endif %}  etcdClientInfo: diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml index 40705d357..41bfc72cb 100644 --- a/roles/openshift_master_cluster/tasks/main.yml +++ b/roles/openshift_master_cluster/tasks/main.yml @@ -10,5 +10,5 @@    failed_when: false    when: openshift.master.cluster_method == "pacemaker" -- include: configure.yml +- include_tasks: configure.yml    when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr" diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index cf0be3bef..39d571358 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -55,8 +55,6 @@        embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}"        embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}"        embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}" -      # defaults to 8053 when using dnsmasq in 1.2/3.2 -      dns_port: "{{ openshift_master_dns_port | default(None) }}"        bind_addr: "{{ openshift_master_bind_addr | default(None) }}"        pod_eviction_timeout: "{{ openshift_master_pod_eviction_timeout | default(None) }}"        session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}" diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml index 3dc15d58b..bb842d710 100644 --- a/roles/openshift_metrics/tasks/generate_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_certificates.yaml @@ -8,4 +8,4 @@      --serial='{{ mktemp.stdout }}/ca.serial.txt'      --name="metrics-signer@{{lookup('pipe','date +%s')}}" -- include: generate_hawkular_certificates.yaml +- include_tasks: generate_hawkular_certificates.yaml diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml index 31129a6ac..0fd19c9f8 100644 --- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml @@ -1,13 +1,13 @@  ---  - name: generate hawkular-metrics certificates -  include: setup_certificate.yaml +  include_tasks: setup_certificate.yaml    vars:      component: hawkular-metrics      hostnames: "hawkular-metrics,hawkular-metrics.{{ openshift_metrics_project }}.svc.cluster.local,{{ openshift_metrics_hawkular_hostname }}"    changed_when: no  - name: generate hawkular-cassandra certificates -  include: setup_certificate.yaml +  include_tasks: setup_certificate.yaml    vars:      component: hawkular-cassandra      hostnames: hawkular-cassandra diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml index 0eb852d91..a33b28ba7 100644 --- a/roles/openshift_metrics/tasks/install_heapster.yaml +++ b/roles/openshift_metrics/tasks/install_heapster.yaml @@ -66,4 +66,4 @@          namespace: "{{ openshift_metrics_project }}"    changed_when: no -- include: generate_heapster_secrets.yaml +- include_tasks: generate_heapster_secrets.yaml diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index fdf4ae57f..49d1d8cf1 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -1,8 +1,8 @@  --- -- include: pre_install.yaml +- include_tasks: pre_install.yaml  - name: Install Metrics -  include: "{{ role_path }}/tasks/install_{{ include_file }}.yaml" +  include_tasks: "install_{{ include_file }}.yaml"    with_items:      - support      - heapster @@ -13,11 +13,11 @@    when: not openshift_metrics_heapster_standalone | bool  - name: Install Heapster Standalone -  include: install_heapster.yaml +  include_tasks: install_heapster.yaml    when: openshift_metrics_heapster_standalone | bool  - name: Install Hawkular OpenShift Agent (HOSA) -  include: install_hosa.yaml +  include_tasks: install_hosa.yaml    when: openshift_metrics_install_hawkular_agent | default(false) | bool  - find: @@ -34,7 +34,7 @@    changed_when: no  - name: Create objects -  include: oc_apply.yaml +  include_tasks: oc_apply.yaml    vars:      kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"      namespace: "{{ openshift_metrics_project }}" @@ -58,7 +58,7 @@    changed_when: no  - name: Create Hawkular Agent objects -  include: oc_apply.yaml +  include_tasks: oc_apply.yaml    vars:      kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"      namespace: "{{ openshift_metrics_hawkular_agent_namespace }}" @@ -67,7 +67,7 @@    with_items: "{{ hawkular_agent_object_defs.results }}"    when: openshift_metrics_install_hawkular_agent | bool -- include: update_master_config.yaml +- include_tasks: update_master_config.yaml  - command: >      {{openshift.common.client_binary}} @@ -80,11 +80,11 @@    changed_when: no  - name: Scaling down cluster to recognize changes -  include: stop_metrics.yaml +  include_tasks: stop_metrics.yaml    when: existing_metrics_rc.stdout_lines | length > 0  - name: Scaling up cluster -  include: start_metrics.yaml +  include_tasks: start_metrics.yaml    tags: openshift_metrics_start_cluster    when:      - openshift_metrics_start_cluster | default(true) | bool diff --git a/roles/openshift_metrics/tasks/install_support.yaml b/roles/openshift_metrics/tasks/install_support.yaml index 584e3be05..c3727d530 100644 --- a/roles/openshift_metrics/tasks/install_support.yaml +++ b/roles/openshift_metrics/tasks/install_support.yaml @@ -19,7 +19,7 @@  - fail: msg="'keytool' is unavailable. Please install java-1.8.0-openjdk-headless on the control node"    when: keytool_check.rc  == 1 -- include: generate_certificates.yaml -- include: generate_serviceaccounts.yaml -- include: generate_services.yaml -- include: generate_rolebindings.yaml +- include_tasks: generate_certificates.yaml +- include_tasks: generate_serviceaccounts.yaml +- include_tasks: generate_services.yaml +- include_tasks: generate_rolebindings.yaml diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index 10509fc1e..9dfe360bb 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -43,15 +43,15 @@    check_mode: no    tags: metrics_init -- include: install_metrics.yaml +- include_tasks: install_metrics.yaml    when:      - openshift_metrics_install_metrics | bool -- include: uninstall_metrics.yaml +- include_tasks: uninstall_metrics.yaml    when:      - not openshift_metrics_install_metrics | bool -- include: uninstall_hosa.yaml +- include_tasks: uninstall_hosa.yaml    when: not openshift_metrics_install_hawkular_agent | bool  - name: Delete temp directory diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 403b1252c..1265c7bfd 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -1,6 +1,6 @@  ---  - name: stop metrics -  include: stop_metrics.yaml +  include_tasks: stop_metrics.yaml  - name: remove metrics components    command: > diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 index 6a3811598..11476bf75 100644 --- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 @@ -24,7 +24,7 @@ spec:          - {{openshift_metrics_cassandra_storage_group}}  {% if node_selector is iterable and node_selector | length > 0 %}        nodeSelector: -{% for key, value in node_selector.iteritems() %} +{% for key, value in node_selector.items() %}          {{key}}: "{{value}}"  {% endfor %}  {% endif %} diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 index 0662bea53..e976bc222 100644 --- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 @@ -19,7 +19,7 @@ spec:        serviceAccount: hawkular  {% if node_selector is iterable and node_selector | length > 0 %}        nodeSelector: -{% for key, value in node_selector.iteritems() %} +{% for key, value in node_selector.items() %}          {{key}}: "{{value}}"  {% endfor %}  {% endif %} diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 index 40d09e9fa..04e2b2937 100644 --- a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 +++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 @@ -19,7 +19,7 @@ spec:        serviceAccount: hawkular-openshift-agent  {% if node_selector is iterable and node_selector | length > 0 %}        nodeSelector: -{% for key, value in node_selector.iteritems() %} +{% for key, value in node_selector.items() %}          {{key}}: "{{value}}"  {% endfor %}  {% endif %} diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2 index e732c1eee..0d4dd0e2b 100644 --- a/roles/openshift_metrics/templates/heapster.j2 +++ b/roles/openshift_metrics/templates/heapster.j2 @@ -20,7 +20,7 @@ spec:        serviceAccountName: heapster  {% if node_selector is iterable and node_selector | length > 0 %}        nodeSelector: -{% for key, value in node_selector.iteritems() %} +{% for key, value in node_selector.items() %}          {{key}}: "{{value}}"  {% endfor %}  {% endif %} diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2 index b4e6a1503..9a4b428ec 100644 --- a/roles/openshift_metrics/templates/pvc.j2 +++ b/roles/openshift_metrics/templates/pvc.j2 @@ -7,13 +7,13 @@ metadata:      metrics-infra: support  {% elif labels %}    labels: -{% for key, value in labels.iteritems() %} +{% for key, value in labels.items() %}      {{ key }}: {{ value }}  {% endfor %}  {% endif %}  {% if annotations is defined and annotations %}    annotations: -{% for key,value in annotations.iteritems() %} +{% for key,value in annotations.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %} @@ -21,7 +21,7 @@ spec:  {% if pv_selector is defined and pv_selector is mapping %}    selector:      matchLabels: -{% for key,value in pv_selector.iteritems() %} +{% for key,value in pv_selector.items() %}        {{key}}: {{value}}  {% endfor %}  {% endif %} diff --git a/roles/openshift_metrics/templates/rolebinding.j2 b/roles/openshift_metrics/templates/rolebinding.j2 index 5230f0780..a9a24c157 100644 --- a/roles/openshift_metrics/templates/rolebinding.j2 +++ b/roles/openshift_metrics/templates/rolebinding.j2 @@ -4,7 +4,7 @@ metadata:    name: {{obj_name}}  {% if labels is defined %}    labels: -{% for k, v in labels.iteritems() %} +{% for k, v in labels.items() %}      {{ k }}: {{ v }}  {% endfor %}  {% endif %} diff --git a/roles/openshift_metrics/templates/route.j2 b/roles/openshift_metrics/templates/route.j2 index 253d6ecf5..9d628b666 100644 --- a/roles/openshift_metrics/templates/route.j2 +++ b/roles/openshift_metrics/templates/route.j2 @@ -7,7 +7,7 @@ metadata:  {% endif %}  {% if labels is defined and labels %}    labels: -{% for k, v in labels.iteritems() %} +{% for k, v in labels.items() %}      {{ k }}: {{ v }}  {% endfor %}  {% endif %} diff --git a/roles/openshift_metrics/templates/secret.j2 b/roles/openshift_metrics/templates/secret.j2 index 5b9dba122..b788be04e 100644 --- a/roles/openshift_metrics/templates/secret.j2 +++ b/roles/openshift_metrics/templates/secret.j2 @@ -4,15 +4,15 @@ metadata:    name: "{{ name }}"  {% if annotations is defined%}    annotations: -{% for key, value in annotations.iteritems() %} +{% for key, value in annotations.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %}    labels: -{% for k, v in labels.iteritems() %} +{% for k, v in labels.items() %}      {{ k }}: {{ v }}  {% endfor %}  data: -{% for k, v in data.iteritems() %} +{% for k, v in data.items() %}    {{ k }}: {{ v }}  {% endfor %} diff --git a/roles/openshift_metrics/templates/service.j2 b/roles/openshift_metrics/templates/service.j2 index ce0bc2eec..4d23982f1 100644 --- a/roles/openshift_metrics/templates/service.j2 +++ b/roles/openshift_metrics/templates/service.j2 @@ -4,13 +4,13 @@ metadata:    name: "{{obj_name}}"  {% if annotations is defined%}    annotations: -{% for key, value in annotations.iteritems() %} +{% for key, value in annotations.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %}  {% if labels is defined%}    labels: -{% for key, value in labels.iteritems() %} +{% for key, value in labels.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %} @@ -22,7 +22,7 @@ spec:    ports:  {% for port in ports %}    - -{% for key, value in port.iteritems() %} +{% for key, value in port.items() %}      {{key}}: {{value}}  {% endfor %}  {% if port.targetPort is undefined %} @@ -33,6 +33,6 @@ spec:      targetPort: {{service_targetPort}}  {% endif %}    selector: -  {% for key, value in selector.iteritems() %} +  {% for key, value in selector.items() %}    {{key}}: {{value}}    {% endfor %} diff --git a/roles/openshift_metrics/templates/serviceaccount.j2 b/roles/openshift_metrics/templates/serviceaccount.j2 index b22acc594..ea19f17d7 100644 --- a/roles/openshift_metrics/templates/serviceaccount.j2 +++ b/roles/openshift_metrics/templates/serviceaccount.j2 @@ -4,7 +4,7 @@ metadata:    name: {{obj_name}}  {% if labels is defined%}    labels: -{% for key, value in labels.iteritems() %} +{% for key, value in labels.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %} diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 89d154ad7..5a0c09f5c 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -1,12 +1,28 @@  ---  openshift_node_debug_level: "{{ debug_level | default(2) }}" +openshift_node_dnsmasq_install_network_manager_hook: true + +# lo must always be present in this list or dnsmasq will conflict with +# the node's dns service. +openshift_node_dnsmasq_except_interfaces: +- lo +  r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"  r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" +l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" +  openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"  openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}" +system_images_registry_dict: +  openshift-enterprise: "registry.access.redhat.com" +  origin: "docker.io" + +system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}" +l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" +  openshift_image_tag: ''  default_r_openshift_node_image_prep_packages: diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node/files/networkmanager/99-origin-dns.sh index f4e48b5b7..f4e48b5b7 100755 --- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh +++ b/roles/openshift_node/files/networkmanager/99-origin-dns.sh diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index b102c1b18..229c6bbed 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -1,4 +1,15 @@  --- +- name: restart NetworkManager +  systemd: +    name: NetworkManager +    state: restarted +    enabled: True + +- name: restart dnsmasq +  systemd: +    name: dnsmasq +    state: restarted +  - name: restart openvswitch    systemd:      name: openvswitch diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index c32aa1600..927d107c6 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -17,11 +17,8 @@ dependencies:  - role: lib_openshift  - role: lib_os_firewall    when: not (openshift_node_upgrade_in_progress | default(False)) -- role: openshift_clock -  when: not (openshift_node_upgrade_in_progress | default(False))  - role: openshift_docker  - role: openshift_cloud_provider    when: not (openshift_node_upgrade_in_progress | default(False)) -- role: openshift_node_dnsmasq  - role: lib_utils    when: openshift_node_upgrade_in_progress | default(False) diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml index b8be50f6c..ac43ef039 100644 --- a/roles/openshift_node/tasks/bootstrap.yml +++ b/roles/openshift_node/tasks/bootstrap.yml @@ -32,8 +32,7 @@      regexp: "^CONFIG_FILE=.*"  - name: include aws sysconfig credentials -  include: aws.yml -  static: yes +  import_tasks: aws.yml    when: not (openshift_node_use_instance_profiles | default(False))  #- name: update the ExecStart to have bootstrap diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 2fea33454..741a2234f 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -1,6 +1,6 @@  ---  - name: Install the systemd units -  include: systemd_units.yml +  include_tasks: systemd_units.yml  - name: Start and enable openvswitch service    systemd: @@ -47,8 +47,7 @@      - restart node  - name: include aws provider credentials -  include: aws.yml -  static: yes +  import_tasks: aws.yml    when: not (openshift_node_use_instance_profiles | default(False))  # Necessary because when you're on a node that's also a master the master will be diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node/tasks/dnsmasq.yml index 9bbaafc29..22bdce6c6 100644 --- a/roles/openshift_node_dnsmasq/tasks/main.yml +++ b/roles/openshift_node/tasks/dnsmasq.yml @@ -59,9 +59,9 @@      state: started  # Dynamic NetworkManager based dispatcher -- include: ./network-manager.yml +- include_tasks: dnsmasq/network-manager.yml    when: network_manager_active | bool  # Relies on ansible in order to configure static config -- include: ./no-network-manager.yml +- include_tasks: dnsmasq/no-network-manager.yml    when: not network_manager_active | bool diff --git a/roles/openshift_node_dnsmasq/tasks/network-manager.yml b/roles/openshift_node/tasks/dnsmasq/network-manager.yml index e5a92a630..e5a92a630 100644 --- a/roles/openshift_node_dnsmasq/tasks/network-manager.yml +++ b/roles/openshift_node/tasks/dnsmasq/network-manager.yml diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml index 8a7da66c2..dede2fb8f 100644 --- a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml +++ b/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml @@ -8,4 +8,4 @@      state: present    notify: restart NetworkManager -- include: ./network-manager.yml +- include_tasks: network-manager.yml diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index 6b7e40491..9a91e2fb6 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -20,7 +20,7 @@  - when:    - openshift.common.is_containerized | bool -  - not openshift.common.is_node_system_container | bool +  - not l_is_node_system_container | bool    block:    - name: Pre-pull node image when containerized      command: > diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index eae9ca7bc..d46b1f9c3 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -6,9 +6,10 @@      - deployment_type == 'openshift-enterprise'      - not openshift_use_crio | default(false) +- include: dnsmasq.yml +  - name: setup firewall -  include: firewall.yml -  static: yes +  import_tasks: firewall.yml  #### Disable SWAP #####  # https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory @@ -41,7 +42,7 @@  #### End Disable Swap Block ####  - name: include node installer -  include: install.yml +  include_tasks: install.yml  - name: Restart cri-o    systemd: @@ -66,34 +67,34 @@      sysctl_file: "/etc/sysctl.d/99-openshift.conf"      reload: yes -- include: registry_auth.yml +- include_tasks: registry_auth.yml  - name: include standard node config -  include: config.yml +  include_tasks: config.yml  #### Storage class plugins here ####  - name: NFS storage plugin configuration -  include: storage_plugins/nfs.yml +  include_tasks: storage_plugins/nfs.yml    tags:      - nfs  - name: GlusterFS storage plugin configuration -  include: storage_plugins/glusterfs.yml +  include_tasks: storage_plugins/glusterfs.yml    when: "'glusterfs' in openshift.node.storage_plugin_deps"  - name: Ceph storage plugin configuration -  include: storage_plugins/ceph.yml +  include_tasks: storage_plugins/ceph.yml    when: "'ceph' in openshift.node.storage_plugin_deps"  - name: iSCSI storage plugin configuration -  include: storage_plugins/iscsi.yml +  include_tasks: storage_plugins/iscsi.yml    when: "'iscsi' in openshift.node.storage_plugin_deps"  ##### END Storage ##### -- include: config/workaround-bz1331590-ovs-oom-fix.yml +- include_tasks: config/workaround-bz1331590-ovs-oom-fix.yml    when: openshift_node_use_openshift_sdn | default(true) | bool  - name: include bootstrap node config -  include: bootstrap.yml +  include_tasks: bootstrap.yml    when: openshift_node_bootstrap diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index 164a79b39..73dc9e130 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -6,14 +6,14 @@  - name: Pre-pull node system container image    command: > -    atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }} +    atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}    register: pull_result    changed_when: "'Pulling layer' in pull_result.stdout"  - name: Install or Update node system container    oc_atomic_container:      name: "{{ openshift.common.service_type }}-node" -    image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}" +    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"      values:      - "DNS_DOMAIN={{ openshift.common.dns_domain }}"      - "DOCKER_SERVICE={{ openshift.docker.service_name }}.service" diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml index 0f73ce454..8c3548475 100644 --- a/roles/openshift_node/tasks/openvswitch_system_container.yml +++ b/roles/openshift_node/tasks/openvswitch_system_container.yml @@ -17,14 +17,14 @@  - name: Pre-pull OpenVSwitch system container image    command: > -    atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }} +    atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}    register: pull_result    changed_when: "'Pulling layer' in pull_result.stdout"  - name: Install or Update OpenVSwitch system container    oc_atomic_container:      name: openvswitch -    image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}" +    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"      state: latest      values:        - "DOCKER_SERVICE={{ l_service_name }}" diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 9c182ade6..397e1ba18 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -3,7 +3,7 @@    template:      dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"      src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}" -  when: not openshift.common.is_node_system_container | bool +  when: not l_is_node_system_container | bool    notify:    - reload systemd units    - restart node @@ -11,21 +11,21 @@  - when: openshift.common.is_containerized | bool    block:    - name: include node deps docker service file -    include: config/install-node-deps-docker-service-file.yml +    include_tasks: config/install-node-deps-docker-service-file.yml    - name: include ovs service environment file -    include: config/install-ovs-service-env-file.yml +    include_tasks: config/install-ovs-service-env-file.yml    - name: Install Node system container -    include: node_system_container.yml +    include_tasks: node_system_container.yml      when: -    - openshift.common.is_node_system_container | bool +    - l_is_node_system_container | bool    - name: Install OpenvSwitch system containers -    include: openvswitch_system_container.yml +    include_tasks: openvswitch_system_container.yml      when:      - openshift_node_use_openshift_sdn | bool -    - openshift.common.is_openvswitch_system_container | bool +    - l_is_openvswitch_system_container | bool  - block:    - name: Pre-pull openvswitch image @@ -34,11 +34,11 @@      register: pull_result      changed_when: "'Downloaded newer image' in pull_result.stdout" -  - include: config/install-ovs-docker-service-file.yml +  - include_tasks: config/install-ovs-docker-service-file.yml    when:    - openshift.common.is_containerized | bool    - openshift_node_use_openshift_sdn | bool -  - not openshift.common.is_openvswitch_system_container | bool +  - not l_is_openvswitch_system_container | bool -- include: config/configure-node-settings.yml -- include: config/configure-proxy-settings.yml +- include_tasks: config/configure-node-settings.yml +- include_tasks: config/configure-proxy-settings.yml diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml index 2bca1e974..ff3478800 100644 --- a/roles/openshift_node/tasks/upgrade.yml +++ b/roles/openshift_node/tasks/upgrade.yml @@ -10,7 +10,7 @@  # tasks file for openshift_node_upgrade -- include: registry_auth.yml +- include_tasks: registry_auth.yml  - name: Stop node and openvswitch services    service: @@ -48,7 +48,7 @@    - openshift.common.is_containerized | bool    - openshift_use_openshift_sdn | bool -- include: docker/upgrade.yml +- include_tasks: docker/upgrade.yml    vars:      # We will restart Docker ourselves after everything is ready:      skip_docker_restart: True @@ -56,10 +56,10 @@    - l_docker_upgrade is defined    - l_docker_upgrade | bool -- include: "{{ node_config_hook }}" +- include_tasks: "{{ node_config_hook }}"    when: node_config_hook is defined -- include: upgrade/rpm_upgrade.yml +- include_tasks: upgrade/rpm_upgrade.yml    vars:      component: "node"      openshift_version: "{{ openshift_pkg_version | default('') }}" @@ -70,7 +70,7 @@      path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"      state: absent -- include: upgrade/containerized_node_upgrade.yml +- include_tasks: upgrade/containerized_node_upgrade.yml    when: openshift.common.is_containerized | bool  - name: Ensure containerized services stopped before Docker restart @@ -165,7 +165,7 @@      value: "/etc/origin/node/resolv.conf"  # Restart all services -- include: upgrade/restart.yml +- include_tasks: upgrade/restart.yml  - name: Wait for node to be ready    oc_obj: @@ -179,5 +179,4 @@    retries: 24    delay: 5 -- include_role: -    name: openshift_node_dnsmasq +- include_tasks: dnsmasq.yml diff --git a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml index 96b94d8b6..245de60a7 100644 --- a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml +++ b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml @@ -6,7 +6,7 @@      skip_node_svc_handlers: True  - name: Update systemd units -  include: ../systemd_units.yml +  include_tasks: ../systemd_units.yml  # This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of  # play when the node has already been marked schedulable again. (this would look strange diff --git a/roles/openshift_node_dnsmasq/templates/node-dnsmasq.conf.j2 b/roles/openshift_node/templates/node-dnsmasq.conf.j2 index 3caa3bd4a..3caa3bd4a 100644 --- a/roles/openshift_node_dnsmasq/templates/node-dnsmasq.conf.j2 +++ b/roles/openshift_node/templates/node-dnsmasq.conf.j2 diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node/templates/origin-dns.conf.j2 index 6543c7c3e..6543c7c3e 100644 --- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 +++ b/roles/openshift_node/templates/origin-dns.conf.j2 diff --git a/roles/openshift_node_dnsmasq/README.md b/roles/openshift_node_dnsmasq/README.md deleted file mode 100644 index 4596190d7..000000000 --- a/roles/openshift_node_dnsmasq/README.md +++ /dev/null @@ -1,27 +0,0 @@ -OpenShift Node DNS resolver -=========================== - -Configure dnsmasq to act as a DNS resolver for an OpenShift node. - -Requirements ------------- - -Role Variables --------------- - -From this role: - -| Name                                                | Default value | Description                                                                       | -|-----------------------------------------------------|---------------|-----------------------------------------------------------------------------------| -| openshift_node_dnsmasq_install_network_manager_hook | true          | Install NetworkManager hook updating /etc/resolv.conf with local dnsmasq instance | - -Dependencies ------------- - -* openshift_common -* openshift_node_facts - -License -------- - -Apache License Version 2.0 diff --git a/roles/openshift_node_dnsmasq/defaults/main.yml b/roles/openshift_node_dnsmasq/defaults/main.yml deleted file mode 100644 index ebcff46b5..000000000 --- a/roles/openshift_node_dnsmasq/defaults/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -openshift_node_dnsmasq_install_network_manager_hook: true - -# lo must always be present in this list or dnsmasq will conflict with -# the node's dns service. -openshift_node_dnsmasq_except_interfaces: -- lo diff --git a/roles/openshift_node_dnsmasq/handlers/main.yml b/roles/openshift_node_dnsmasq/handlers/main.yml deleted file mode 100644 index 9f98126a0..000000000 --- a/roles/openshift_node_dnsmasq/handlers/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: restart NetworkManager -  systemd: -    name: NetworkManager -    state: restarted -    enabled: True - -- name: restart dnsmasq -  systemd: -    name: dnsmasq -    state: restarted diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml deleted file mode 100644 index d80ed1b72..000000000 --- a/roles/openshift_node_dnsmasq/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: -  author: Scott Dodson -  description: OpenShift Node DNSMasq support -  company: Red Hat, Inc. -  license: Apache License, Version 2.0 -  min_ansible_version: 2.2 -  platforms: -  - name: EL -    versions: -    - 7 -  categories: -  - cloud -dependencies: -- role: openshift_node_facts diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml index b45130400..d33d09980 100644 --- a/roles/openshift_node_facts/tasks/main.yml +++ b/roles/openshift_node_facts/tasks/main.yml @@ -15,7 +15,6 @@        kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"        labels: "{{ openshift_node_labels | default(None) }}"        registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}" -      schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"        sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"        storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"        set_node_ip: "{{ openshift_set_node_ip | default(None) }}" diff --git a/roles/openshift_node_group/tasks/main.yml b/roles/openshift_node_group/tasks/main.yml index c7c15683d..43ecf1b8b 100644 --- a/roles/openshift_node_group/tasks/main.yml +++ b/roles/openshift_node_group/tasks/main.yml @@ -1,6 +1,6 @@  ---  - name: Build node config maps -  include: create_config.yml +  include_tasks: create_config.yml    vars:      openshift_node_group_name: "{{ node_group.name }}"      openshift_node_group_edits: "{{ node_group.edits | default([]) }}" diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 index bfa65b460..0e7538629 100644 --- a/roles/openshift_openstack/templates/heat_stack.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -724,7 +724,7 @@ resources:            type:        node            subtype:     app            node_labels: -{% for k, v in openshift_openstack_cluster_node_labels.app.iteritems() %} +{% for k, v in openshift_openstack_cluster_node_labels.app.items() %}              {{ k|e }}: {{ v|e }}  {% endfor %}            image:       {{ openshift_openstack_node_image }} @@ -788,7 +788,7 @@ resources:            type:        node            subtype:     infra            node_labels: -{% for k, v in openshift_openstack_cluster_node_labels.infra.iteritems() %} +{% for k, v in openshift_openstack_cluster_node_labels.infra.items() %}              {{ k|e }}: {{ v|e }}  {% endfor %}            image:       {{ openshift_openstack_infra_image }} diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 index 9c5103597..ee9dac7cb 100644 --- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 +++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 @@ -9,7 +9,7 @@ items:      name: "{{ volume.name }}"  {% if volume.labels is defined and volume.labels is mapping %}      labels: -{% for key,value in volume.labels.iteritems() %} +{% for key,value in volume.labels.items() %}        {{ key }}: {{ value }}  {% endfor %}  {% endif %} diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md index 92f74928c..f1eca1da6 100644 --- a/roles/openshift_prometheus/README.md +++ b/roles/openshift_prometheus/README.md @@ -23,6 +23,17 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).  - `openshift_prometheus_<COMPONENT>_image_version`: specify image version for the component  +- `openshift_prometheus_args`: Modify or add arguments for prometheus application + +- `openshift_prometheus_hostname`: specify the hostname for the route to prometheus `prometheus-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}` + +- `openshift_prometheus_alerts_hostname`: specify the hostname for the route to prometheus-alerts `prometheus_alerts-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}` + +e.g +``` +openshift_prometheus_args=['--storage.tsdb.retention=6h', '--storage.tsdb.min-block-duration=5s', '--storage.tsdb.max-block-duration=6m'] +``` +  ## PVC related variables  Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable:  ``` diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml index 4e2cea0b9..df331a4bb 100644 --- a/roles/openshift_prometheus/defaults/main.yaml +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -4,11 +4,18 @@ openshift_prometheus_state: present  openshift_prometheus_namespace: openshift-metrics +# defaults hosts for routes +openshift_prometheus_hostname: prometheus-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}} +openshift_prometheus_alerts_hostname: alerts-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}} +  openshift_prometheus_node_selector: {"region":"infra"}  # additional prometheus rules file  openshift_prometheus_additional_rules_file: null +#prometheus application arguments +openshift_prometheus_args: ['--storage.tsdb.retention=6h', '--storage.tsdb.min-block-duration=2m'] +  # storage  # One of ['emptydir', 'pvc']  openshift_prometheus_storage_type: "emptydir" diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index 21da4bc9d..ad15dc65f 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -3,7 +3,7 @@  # namespace  - name: Add prometheus project    oc_project: -    state: "{{ state }}" +    state: present      name: "{{ openshift_prometheus_namespace }}"      node_selector: "{{ openshift_prometheus_node_selector | oo_selector_to_string_list() }}"      description: Prometheus @@ -11,7 +11,7 @@  # secrets  - name: Set alert and prometheus secrets    oc_secret: -    state: "{{ state }}" +    state: present      name: "{{ item }}-proxy"      namespace: "{{ openshift_prometheus_namespace }}"      contents: @@ -24,7 +24,7 @@  # serviceaccount  - name: create prometheus serviceaccount    oc_serviceaccount: -    state: "{{ state }}" +    state: present      name: prometheus      namespace: "{{ openshift_prometheus_namespace }}"      #    TODO add annotations when supproted @@ -48,7 +48,7 @@  # create clusterrolebinding for prometheus serviceaccount  - name: Set cluster-reader permissions for prometheus    oc_adm_policy_user: -    state: "{{ state }}" +    state: present      namespace: "{{ openshift_prometheus_namespace }}"      resource_kind: cluster-role      resource_name: cluster-reader @@ -58,7 +58,7 @@  # TODO join into 1 task with loop  - name: Create prometheus service    oc_service: -    state: "{{ state }}" +    state: present      name: "{{ item.name }}"      namespace: "{{ openshift_prometheus_namespace }}"      selector: @@ -76,7 +76,7 @@  - name: Create alerts service    oc_service: -    state: "{{ state }}" +    state: present      name: "{{ item.name }}"      namespace: "{{ openshift_prometheus_namespace }}"      selector: @@ -111,14 +111,17 @@  # create prometheus and alerts routes  - name: create prometheus and alerts routes    oc_route: -    state: "{{ state }}" +    state: present      name: "{{ item.name }}" +    host: "{{ item.host }}"      namespace: "{{ openshift_prometheus_namespace }}"      service_name: "{{ item.name }}"      tls_termination: reencrypt    with_items:      - name: prometheus +      host: "{{ openshift_prometheus_hostname }}"      - name: alerts +      host: "{{ openshift_prometheus_alerts_hostname }}"  # Storage  - name: create prometheus pvc @@ -185,7 +188,7 @@  # In prometheus configmap create "additional.rules" section if file exists  - name: Set prometheus configmap    oc_configmap: -    state: "{{ state }}" +    state: present      name: "prometheus"      namespace: "{{ openshift_prometheus_namespace }}"      from_file: @@ -196,7 +199,7 @@  - name: Set prometheus configmap    oc_configmap: -    state: "{{ state }}" +    state: present      name: "prometheus"      namespace: "{{ openshift_prometheus_namespace }}"      from_file: @@ -212,7 +215,7 @@  - name: Set alertmanager configmap    oc_configmap: -    state: "{{ state }}" +    state: present      name: "prometheus-alerts"      namespace: "{{ openshift_prometheus_namespace }}"      from_file: @@ -229,7 +232,7 @@  - name: Set prometheus stateful set    oc_obj: -    state: "{{ state }}" +    state: present      name: "prometheus"      namespace: "{{ openshift_prometheus_namespace }}"      kind: statefulset diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml index 5cc9a67eb..38798e1f5 100644 --- a/roles/openshift_prometheus/tasks/main.yaml +++ b/roles/openshift_prometheus/tasks/main.yaml @@ -20,9 +20,11 @@      mode: 0755    changed_when: False -- include: install_prometheus.yaml -  vars: -    state: "{{ openshift_prometheus_state }}" +- include_tasks: install_prometheus.yaml +  when: openshift_prometheus_state == 'present' + +- include_tasks: uninstall_prometheus.yaml +  when: openshift_prometheus_state == 'absent'  - name: Delete temp directory    file: diff --git a/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml b/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml new file mode 100644 index 000000000..d746402db --- /dev/null +++ b/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml @@ -0,0 +1,7 @@ +--- + +# remove namespace - This will delete all the objects inside the namespace +- name: Remove prometheus project +  oc_project: +    state: absent +    name: "{{ openshift_prometheus_namespace }}" diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2 index 456db3a57..d780550b8 100644 --- a/roles/openshift_prometheus/templates/prometheus.j2 +++ b/roles/openshift_prometheus/templates/prometheus.j2 @@ -22,7 +22,7 @@ spec:        serviceAccountName: prometheus  {% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %}        nodeSelector: -{% for key, value in openshift_prometheus_node_selector.iteritems() %} +{% for key, value in openshift_prometheus_node_selector.items() %}          {{ key }}: "{{ value }}"  {% endfor %}  {% endif %} @@ -75,8 +75,9 @@ spec:        - name: prometheus          args: -        - --storage.tsdb.retention=6h -        - --storage.tsdb.min-block-duration=2m +{% for arg in openshift_prometheus_args %} +        - {{ arg }} +{% endfor %}          - --config.file=/etc/prometheus/prometheus.yml          - --web.listen-address=localhost:9090          image: "{{ l_openshift_prometheus_image_prefix }}prometheus:{{ l_openshift_prometheus_image_version }}" diff --git a/roles/openshift_prometheus/vars/default_images.yml b/roles/openshift_prometheus/vars/default_images.yml index ad52a3125..31f6c1bb1 100644 --- a/roles/openshift_prometheus/vars/default_images.yml +++ b/roles/openshift_prometheus/vars/default_images.yml @@ -6,7 +6,7 @@ l_openshift_prometheus_alertmanager_image_prefix: "{{ openshift_prometheus_alter  l_openshift_prometheus_alertbuffer_image_prefix: "{{ openshift_prometheus_alertbuffer_image_prefix | default(l_openshift_prometheus_image_prefix) }}"  # image version defaults -l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default('v2.0.0-dev.3') }}" +l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default('v2.0.0') }}"  l_openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default('v1.0.0') }}"  l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v0.9.1') }}"  l_openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default('v0.0.2') }}" diff --git a/roles/openshift_provisioners/tasks/install_provisioners.yaml b/roles/openshift_provisioners/tasks/install_provisioners.yaml index 324fdcc82..2d1217c74 100644 --- a/roles/openshift_provisioners/tasks/install_provisioners.yaml +++ b/roles/openshift_provisioners/tasks/install_provisioners.yaml @@ -16,10 +16,10 @@    when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_secret_access_key is not defined  - name: Install support -  include: install_support.yaml +  include_tasks: install_support.yaml  - name: Install EFS -  include: install_efs.yaml +  include_tasks: install_efs.yaml    when: openshift_provisioners_efs | bool  - find: paths={{ mktemp.stdout }}/templates patterns=*.yaml @@ -32,7 +32,7 @@    changed_when: no  - name: Create objects -  include: oc_apply.yaml +  include_tasks: oc_apply.yaml    vars:      - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"      - namespace: "{{ openshift_provisioners_project }}" @@ -51,5 +51,5 @@    when: ansible_check_mode  - name: Scaling up cluster -  include: start_cluster.yaml +  include_tasks: start_cluster.yaml    when: start_cluster | default(true) | bool diff --git a/roles/openshift_provisioners/tasks/install_support.yaml b/roles/openshift_provisioners/tasks/install_support.yaml index d6db81ab9..93c4c394d 100644 --- a/roles/openshift_provisioners/tasks/install_support.yaml +++ b/roles/openshift_provisioners/tasks/install_support.yaml @@ -10,8 +10,8 @@    changed_when: False    check_mode: no -- include: generate_secrets.yaml +- include_tasks: generate_secrets.yaml -- include: generate_clusterrolebindings.yaml +- include_tasks: generate_clusterrolebindings.yaml -- include: generate_serviceaccounts.yaml +- include_tasks: generate_serviceaccounts.yaml diff --git a/roles/openshift_provisioners/tasks/main.yaml b/roles/openshift_provisioners/tasks/main.yaml index a50c78c97..4ba26b2b8 100644 --- a/roles/openshift_provisioners/tasks/main.yaml +++ b/roles/openshift_provisioners/tasks/main.yaml @@ -12,10 +12,10 @@    check_mode: no    tags: provisioners_init -- include: "{{ role_path }}/tasks/install_provisioners.yaml" +- include_tasks: install_provisioners.yaml    when: openshift_provisioners_install_provisioners | default(false) | bool -- include: "{{ role_path }}/tasks/uninstall_provisioners.yaml" +- include_tasks: uninstall_provisioners.yaml    when: not openshift_provisioners_install_provisioners | default(false) | bool  - name: Delete temp directory diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml index 0be4bc7d2..602dee773 100644 --- a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml +++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml @@ -1,6 +1,6 @@  ---  - name: stop provisioners -  include: stop_cluster.yaml +  include_tasks: stop_cluster.yaml  # delete the deployment objects that we had created  - name: delete provisioner api objects diff --git a/roles/openshift_provisioners/templates/clusterrolebinding.j2 b/roles/openshift_provisioners/templates/clusterrolebinding.j2 index 994afa32d..1f26c93a4 100644 --- a/roles/openshift_provisioners/templates/clusterrolebinding.j2 +++ b/roles/openshift_provisioners/templates/clusterrolebinding.j2 @@ -4,7 +4,7 @@ metadata:    name: {{obj_name}}  {% if labels is defined%}    labels: -{% for key, value in labels.iteritems() %} +{% for key, value in labels.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %} diff --git a/roles/openshift_provisioners/templates/efs.j2 b/roles/openshift_provisioners/templates/efs.j2 index 81b9ccca5..37fd02977 100644 --- a/roles/openshift_provisioners/templates/efs.j2 +++ b/roles/openshift_provisioners/templates/efs.j2 @@ -22,7 +22,7 @@ spec:        serviceAccountName: "{{deploy_serviceAccount}}"  {% if node_selector is iterable and node_selector | length > 0 %}        nodeSelector: -{% for key, value in node_selector.iteritems() %} +{% for key, value in node_selector.items() %}          {{key}}: "{{value}}"  {% endfor %}  {% endif %} diff --git a/roles/openshift_provisioners/templates/pv.j2 b/roles/openshift_provisioners/templates/pv.j2 index f81b1617a..b648cd15e 100644 --- a/roles/openshift_provisioners/templates/pv.j2 +++ b/roles/openshift_provisioners/templates/pv.j2 @@ -4,13 +4,13 @@ metadata:    name: {{obj_name}}  {% if annotations is defined %}    annotations: -{% for key,value in annotations.iteritems() %} +{% for key,value in annotations.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %}  {% if labels is defined%}    labels: -{% for key, value in labels.iteritems() %} +{% for key, value in labels.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %} diff --git a/roles/openshift_provisioners/templates/pvc.j2 b/roles/openshift_provisioners/templates/pvc.j2 index 0dd8772eb..0a88b7c88 100644 --- a/roles/openshift_provisioners/templates/pvc.j2 +++ b/roles/openshift_provisioners/templates/pvc.j2 @@ -4,7 +4,7 @@ metadata:    name: {{obj_name}}  {% if annotations is defined %}    annotations: -{% for key,value in annotations.iteritems() %} +{% for key,value in annotations.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %} @@ -12,7 +12,7 @@ spec:  {% if pv_selector is defined and pv_selector is mapping %}    selector:      matchLabels: -{% for key,value in pv_selector.iteritems() %} +{% for key,value in pv_selector.items() %}        {{key}}: {{value}}  {% endfor %}  {% endif %} diff --git a/roles/openshift_provisioners/templates/secret.j2 b/roles/openshift_provisioners/templates/secret.j2 index 78824095b..2fbb28829 100644 --- a/roles/openshift_provisioners/templates/secret.j2 +++ b/roles/openshift_provisioners/templates/secret.j2 @@ -4,7 +4,7 @@ metadata:    name: {{obj_name}}  {% if labels is defined%}    labels: -{% for key, value in labels.iteritems() %} +{% for key, value in labels.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %} diff --git a/roles/openshift_provisioners/templates/serviceaccount.j2 b/roles/openshift_provisioners/templates/serviceaccount.j2 index b22acc594..ea19f17d7 100644 --- a/roles/openshift_provisioners/templates/serviceaccount.j2 +++ b/roles/openshift_provisioners/templates/serviceaccount.j2 @@ -4,7 +4,7 @@ metadata:    name: {{obj_name}}  {% if labels is defined%}    labels: -{% for key, value in labels.iteritems() %} +{% for key, value in labels.items() %}      {{key}}: {{value}}  {% endfor %}  {% endif %} diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 95ba9fe4c..552a22a0f 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -35,7 +35,7 @@    - when: r_openshift_repos_has_run is not defined      block: -    - include: centos_repos.yml +    - include_tasks: centos_repos.yml        when:        - ansible_os_family == "RedHat"        - ansible_distribution != "Fedora" diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml index 94d3acffc..795b8ee60 100644 --- a/roles/openshift_sanitize_inventory/tasks/deprecations.yml +++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml @@ -16,6 +16,6 @@  # for with_fileglob Ansible resolves the path relative to the roles/<rolename>/files directory  - name: Assign deprecated variables to correct counterparts -  include: "{{ item }}" +  include_tasks: "{{ item }}"    with_fileglob:    - "../tasks/__deprecations_*.yml" diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 70b236033..77428272c 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -1,7 +1,7 @@  ---  # We should print out deprecations prior to any failures so that if a play does fail for other reasons  # the user would also be aware of any deprecated variables they should note to adjust -- include: deprecations.yml +- include_tasks: deprecations.yml  - name: Abort when conflicting deployment type variables are set    when: @@ -53,7 +53,7 @@        openshift_release is "{{ openshift_release }}" which is not a valid version string.        Please set it to a version string like "3.4". -- include: unsupported.yml +- include_tasks: unsupported.yml    when:      - not openshift_enable_unsupported_configurations | default(false) | bool diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 3507330e3..41a6691c9 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -40,7 +40,7 @@        command: >          {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog -- include: generate_certs.yml +- include_tasks: generate_certs.yml  - copy:      src: kubeservicecatalog_roles_bindings.yml @@ -252,7 +252,7 @@      session_affinity: None      service_type: ClusterIP -- include: start_api_server.yml +- include_tasks: start_api_server.yml  - name: Delete temp directory    file: diff --git a/roles/openshift_service_catalog/tasks/main.yml b/roles/openshift_service_catalog/tasks/main.yml index dc0d6a370..ffdbe2b11 100644 --- a/roles/openshift_service_catalog/tasks/main.yml +++ b/roles/openshift_service_catalog/tasks/main.yml @@ -1,8 +1,8 @@  ---  # do any asserts here -- include: install.yml +- include_tasks: install.yml    when: not openshift_service_catalog_remove | default(false) | bool -- include: remove.yml +- include_tasks: remove.yml    when: openshift_service_catalog_remove | default(false) | bool diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2 index 0e5bb7230..4f51b8c3c 100644 --- a/roles/openshift_service_catalog/templates/api_server.j2 +++ b/roles/openshift_service_catalog/templates/api_server.j2 @@ -19,7 +19,7 @@ spec:      spec:        serviceAccountName: service-catalog-apiserver        nodeSelector: -{% for key, value in node_selector.iteritems() %} +{% for key, value in node_selector.items() %}            {{key}}: "{{value}}"  {% endfor %}        containers: diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2 index e5e5f6b50..137222f04 100644 --- a/roles/openshift_service_catalog/templates/controller_manager.j2 +++ b/roles/openshift_service_catalog/templates/controller_manager.j2 @@ -19,7 +19,7 @@ spec:      spec:        serviceAccountName: service-catalog-controller        nodeSelector: -{% for key, value in node_selector.iteritems() %} +{% for key, value in node_selector.items() %}          {{key}}: "{{value}}"  {% endfor %}        containers: diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml index 9c1409dee..63dd5cce6 100644 --- a/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml +++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml @@ -83,7 +83,6 @@ objects:          containers:          - name: glusterblock-provisioner            image: ${IMAGE_NAME}:${IMAGE_VERSION} -          image: gluster/glusterblock-provisioner:latest            imagePullPolicy: IfNotPresent            env:            - name: PROVISIONER_NAME diff --git a/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml new file mode 100644 index 000000000..34af652c2 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml @@ -0,0 +1,133 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: deploy-heketi +  labels: +    glusterfs: heketi-template +    deploy-heketi: support +  annotations: +    description: Bootstrap Heketi installation +    tags: glusterfs,heketi,installation +objects: +- kind: Service +  apiVersion: v1 +  metadata: +    name: deploy-heketi-${CLUSTER_NAME} +    labels: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-service +      deploy-heketi: support +    annotations: +      description: Exposes Heketi service +  spec: +    ports: +    - name: deploy-heketi-${CLUSTER_NAME} +      port: 8080 +      targetPort: 8080 +    selector: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route +  apiVersion: v1 +  metadata: +    name: ${HEKETI_ROUTE} +    labels: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-route +      deploy-heketi: support +  spec: +    to: +      kind: Service +      name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig +  apiVersion: v1 +  metadata: +    name: deploy-heketi-${CLUSTER_NAME} +    labels: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-dc +      deploy-heketi: support +    annotations: +      description: Defines how to deploy Heketi +  spec: +    replicas: 1 +    selector: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +    triggers: +    - type: ConfigChange +    strategy: +      type: Recreate +    template: +      metadata: +        name: deploy-heketi +        labels: +          glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +          deploy-heketi: support +      spec: +        serviceAccountName: heketi-${CLUSTER_NAME}-service-account +        containers: +        - name: heketi +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          env: +          - name: HEKETI_USER_KEY +            value: ${HEKETI_USER_KEY} +          - name: HEKETI_ADMIN_KEY +            value: ${HEKETI_ADMIN_KEY} +          - name: HEKETI_EXECUTOR +            value: ${HEKETI_EXECUTOR} +          - name: HEKETI_FSTAB +            value: ${HEKETI_FSTAB} +          - name: HEKETI_SNAPSHOT_LIMIT +            value: '14' +          - name: HEKETI_KUBE_GLUSTER_DAEMONSET +            value: '1' +          ports: +          - containerPort: 8080 +          volumeMounts: +          - name: db +            mountPath: /var/lib/heketi +          - name: config +            mountPath: /etc/heketi +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 3 +            httpGet: +              path: /hello +              port: 8080 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 30 +            httpGet: +              path: /hello +              port: 8080 +        volumes: +        - name: db +        - name: config +          secret: +            secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY +  displayName: Heketi User Secret +  description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY +  displayName: Heketi Administrator Secret +  description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR +  displayName: heketi executor type +  description: Set the executor type, kubernetes or ssh +  value: kubernetes +- name: HEKETI_FSTAB +  displayName: heketi fstab path +  description: Set the fstab path, file that is populated with bricks that heketi creates +  value: /var/lib/heketi/fstab +- name: HEKETI_ROUTE +  displayName: heketi route name +  description: Set the hostname for the route URL +  value: "heketi-glusterfs" +- name: IMAGE_NAME +  displayName: heketi container image name +  required: True +- name: IMAGE_VERSION +  displayName: heketi container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify this heketi service, useful for running multiple heketi instances +  value: glusterfs diff --git a/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml new file mode 100644 index 000000000..064b51473 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml @@ -0,0 +1,67 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: gluster-s3-pvcs +  labels: +    glusterfs: s3-pvcs-template +    gluster-s3: pvcs-template +  annotations: +    description: Gluster S3 service template +    tags: glusterfs,heketi,gluster-s3 +objects: +- kind: PersistentVolumeClaim +  apiVersion: v1 +  metadata: +    name: "${PVC}" +    labels: +      glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage +      gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc +    annotations: +      volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}" +  spec: +    accessModes: +    - ReadWriteMany +    resources: +      requests: +        storage: "${PVC_SIZE}" +- kind: PersistentVolumeClaim +  apiVersion: v1 +  metadata: +    name: "${META_PVC}" +    labels: +      glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage +      gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc +    annotations: +      volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}" +  spec: +    accessModes: +    - ReadWriteMany +    resources: +      requests: +        storage: "${META_PVC_SIZE}" +parameters: +- name: S3_ACCOUNT +  displayName: S3 Account Name +  description: S3 storage account which will provide storage on GlusterFS volumes +  required: true +- name: PVC +  displayName: Primary GlusterFS-backed PVC +  description: GlusterFS-backed PVC for object storage +  required: true +- name: PVC_SIZE +  displayName: Primary GlusterFS-backed PVC capacity +  description: Capacity for GlusterFS-backed PVC for object storage +  value: 2Gi +- name: META_PVC +  displayName: Metadata GlusterFS-backed PVC +  description: GlusterFS-backed PVC for object storage metadata +  required: true +- name: META_PVC_SIZE +  displayName: Metadata GlusterFS-backed PVC capacity +  description: Capacity for GlusterFS-backed PVC for object storage metadata +  value: 1Gi +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances +  value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml new file mode 100644 index 000000000..896a1b226 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml @@ -0,0 +1,140 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: gluster-s3 +  labels: +    glusterfs: s3-template +    gluster-s3: template +  annotations: +    description: Gluster S3 service template +    tags: glusterfs,heketi,gluster-s3 +objects: +- kind: Service +  apiVersion: v1 +  metadata: +    name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service +    labels: +      glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service +      gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service +  spec: +    ports: +    - protocol: TCP +      port: 8080 +      targetPort: 8080 +    selector: +      glusterfs: s3-pod +    type: ClusterIP +    sessionAffinity: None +  status: +    loadBalancer: {} +- kind: Route +  apiVersion: v1 +  metadata: +    name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route +    labels: +      glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route +      gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route +  spec: +    to: +      kind: Service +      name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service +- kind: DeploymentConfig +  apiVersion: v1 +  metadata: +    name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc +    labels: +      glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc +      gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc +    annotations: +      openshift.io/scc: privileged +      description: Defines how to deploy gluster s3 object storage +  spec: +    replicas: 1 +    selector: +      glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod +    template: +      metadata: +        name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3 +        labels: +          glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod +          gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod +      spec: +        containers: +        - name: gluster-s3 +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          imagePullPolicy: IfNotPresent +          ports: +          - name: gluster +            containerPort: 8080 +            protocol: TCP +          env: +          - name: S3_ACCOUNT +            value: "${S3_ACCOUNT}" +          - name: S3_USER +            value: "${S3_USER}" +          - name: S3_PASSWORD +            value: "${S3_PASSWORD}" +          resources: {} +          volumeMounts: +          - name: gluster-vol1 +            mountPath: "/mnt/gluster-object/${S3_ACCOUNT}" +          - name: gluster-vol2 +            mountPath: "/mnt/gluster-object/gsmetadata" +          - name: glusterfs-cgroup +            readOnly: true +            mountPath: "/sys/fs/cgroup" +          terminationMessagePath: "/dev/termination-log" +          securityContext: +            privileged: true +        volumes: +        - name: glusterfs-cgroup +          hostPath: +            path: "/sys/fs/cgroup" +        - name: gluster-vol1 +          persistentVolumeClaim: +            claimName: ${PVC} +        - name: gluster-vol2 +          persistentVolumeClaim: +            claimName: ${META_PVC} +        restartPolicy: Always +        terminationGracePeriodSeconds: 30 +        dnsPolicy: ClusterFirst +        serviceAccountName: default +        serviceAccount: default +        securityContext: {} +parameters: +- name: IMAGE_NAME +  displayName: glusterblock provisioner container image name +  required: True +- name: IMAGE_VERSION +  displayName: glusterblock provisioner container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances +  value: storage +- name: S3_ACCOUNT +  displayName: S3 Account Name +  description: S3 storage account which will provide storage on GlusterFS volumes +  required: true +- name: S3_USER +  displayName: S3 User +  description: S3 user who can access the S3 storage account +  required: true +- name: S3_PASSWORD +  displayName: S3 User Password +  description: Password for the S3 user +  required: true +- name: PVC +  displayName: Primary GlusterFS-backed PVC +  description: GlusterFS-backed PVC for object storage +  value: gluster-s3-claim +- name: META_PVC +  displayName: Metadata GlusterFS-backed PVC +  description: GlusterFS-backed PVC for object storage metadata +  value: gluster-s3-meta-claim +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances +  value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml new file mode 100644 index 000000000..63dd5cce6 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml @@ -0,0 +1,104 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: glusterblock-provisioner +  labels: +    glusterfs: block-template +    glusterblock: template +  annotations: +    description: glusterblock provisioner template +    tags: glusterfs +objects: +- kind: ClusterRole +  apiVersion: v1 +  metadata: +    name: glusterblock-provisioner-runner +    labels: +      glusterfs: block-provisioner-runner-clusterrole +      glusterblock: provisioner-runner-clusterrole +  rules: +    - apiGroups: [""] +      resources: ["persistentvolumes"] +      verbs: ["get", "list", "watch", "create", "delete"] +    - apiGroups: [""] +      resources: ["persistentvolumeclaims"] +      verbs: ["get", "list", "watch", "update"] +    - apiGroups: ["storage.k8s.io"] +      resources: ["storageclasses"] +      verbs: ["get", "list", "watch"] +    - apiGroups: [""] +      resources: ["events"] +      verbs: ["list", "watch", "create", "update", "patch"] +    - apiGroups: [""] +      resources: ["services"] +      verbs: ["get"] +    - apiGroups: [""] +      resources: ["secrets"] +      verbs: ["get", "create", "delete"] +    - apiGroups: [""] +      resources: ["routes"] +      verbs: ["get", "list"] +- apiVersion: v1 +  kind: ServiceAccount +  metadata: +    name: glusterblock-${CLUSTER_NAME}-provisioner +    labels: +      glusterfs: block-${CLUSTER_NAME}-provisioner-sa +      glusterblock: ${CLUSTER_NAME}-provisioner-sa +- apiVersion: v1 +  kind: ClusterRoleBinding +  metadata: +    name: glusterblock-${CLUSTER_NAME}-provisioner +  roleRef: +    name: glusterblock-provisioner-runner +  subjects: +  - kind: ServiceAccount +    name: glusterblock-${CLUSTER_NAME}-provisioner +    namespace: ${NAMESPACE} +- kind: DeploymentConfig +  apiVersion: v1 +  metadata: +    name: glusterblock-${CLUSTER_NAME}-provisioner-dc +    labels: +      glusterfs: block-${CLUSTER_NAME}-provisioner-dc +      glusterblock: ${CLUSTER_NAME}-provisioner-dc +    annotations: +      description: Defines how to deploy the glusterblock provisioner pod. +  spec: +    replicas: 1 +    selector: +      glusterfs: block-${CLUSTER_NAME}-provisioner-pod +    triggers: +    - type: ConfigChange +    strategy: +      type: Recreate +    template: +      metadata: +        name: glusterblock-provisioner +        labels: +          glusterfs: block-${CLUSTER_NAME}-provisioner-pod +      spec: +        serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner +        containers: +        - name: glusterblock-provisioner +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          imagePullPolicy: IfNotPresent +          env: +          - name: PROVISIONER_NAME +            value: gluster.org/glusterblock +parameters: +- name: IMAGE_NAME +  displayName: glusterblock provisioner container image name +  required: True +- name: IMAGE_VERSION +  displayName: glusterblock provisioner container image version +  required: True +- name: NAMESPACE +  displayName: glusterblock provisioner namespace +  description: The namespace in which these resources are being created +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances +  value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml new file mode 100644 index 000000000..09850a2c2 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml @@ -0,0 +1,154 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: glusterfs +  labels: +    glusterfs: template +  annotations: +    description: GlusterFS DaemonSet template +    tags: glusterfs +objects: +- kind: DaemonSet +  apiVersion: extensions/v1beta1 +  metadata: +    name: glusterfs-${CLUSTER_NAME} +    labels: +      glusterfs: ${CLUSTER_NAME}-daemonset +    annotations: +      description: GlusterFS DaemonSet +      tags: glusterfs +  spec: +    selector: +      matchLabels: +        glusterfs: ${CLUSTER_NAME}-pod +    template: +      metadata: +        name: glusterfs-${CLUSTER_NAME} +        labels: +          glusterfs: ${CLUSTER_NAME}-pod +          glusterfs-node: pod +      spec: +        nodeSelector: "${{NODE_LABELS}}" +        hostNetwork: true +        containers: +        - name: glusterfs +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          imagePullPolicy: IfNotPresent +          env: +          - name: GB_GLFS_LRU_COUNT +            value: "${GB_GLFS_LRU_COUNT}" +          - name: TCMU_LOGDIR +            value: "${TCMU_LOGDIR}" +          resources: +            requests: +              memory: 100Mi +              cpu: 100m +          volumeMounts: +          - name: glusterfs-heketi +            mountPath: "/var/lib/heketi" +          - name: glusterfs-run +            mountPath: "/run" +          - name: glusterfs-lvm +            mountPath: "/run/lvm" +          - name: glusterfs-etc +            mountPath: "/etc/glusterfs" +          - name: glusterfs-logs +            mountPath: "/var/log/glusterfs" +          - name: glusterfs-config +            mountPath: "/var/lib/glusterd" +          - name: glusterfs-dev +            mountPath: "/dev" +          - name: glusterfs-misc +            mountPath: "/var/lib/misc/glusterfsd" +          - name: glusterfs-cgroup +            mountPath: "/sys/fs/cgroup" +            readOnly: true +          - name: glusterfs-ssl +            mountPath: "/etc/ssl" +            readOnly: true +          securityContext: +            capabilities: {} +            privileged: true +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 40 +            exec: +              command: +              - "/bin/bash" +              - "-c" +              - systemctl status glusterd.service +            periodSeconds: 25 +            successThreshold: 1 +            failureThreshold: 15 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 40 +            exec: +              command: +              - "/bin/bash" +              - "-c" +              - systemctl status glusterd.service +            periodSeconds: 25 +            successThreshold: 1 +            failureThreshold: 15 +          terminationMessagePath: "/dev/termination-log" +        volumes: +        - name: glusterfs-heketi +          hostPath: +            path: "/var/lib/heketi" +        - name: glusterfs-run +          emptyDir: {} +        - name: glusterfs-lvm +          hostPath: +            path: "/run/lvm" +        - name: glusterfs-etc +          hostPath: +            path: "/etc/glusterfs" +        - name: glusterfs-logs +          hostPath: +            path: "/var/log/glusterfs" +        - name: glusterfs-config +          hostPath: +            path: "/var/lib/glusterd" +        - name: glusterfs-dev +          hostPath: +            path: "/dev" +        - name: glusterfs-misc +          hostPath: +            path: "/var/lib/misc/glusterfsd" +        - name: glusterfs-cgroup +          hostPath: +            path: "/sys/fs/cgroup" +        - name: glusterfs-ssl +          hostPath: +            path: "/etc/ssl" +        restartPolicy: Always +        terminationGracePeriodSeconds: 30 +        dnsPolicy: ClusterFirst +        securityContext: {} +parameters: +- name: NODE_LABELS +  displayName: Daemonset Node Labels +  description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' +  value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME +  displayName: GlusterFS container image name +  required: True +- name: IMAGE_VERSION +  displayName: GlusterFS container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances +  value: storage +- name: GB_GLFS_LRU_COUNT +  displayName: Maximum number of block hosting volumes +  description: This value is to set maximum number of block hosting volumes. +  value: "15" +  required: true +- name: TCMU_LOGDIR +  displayName: Tcmu runner log directory +  description: This value is to set tcmu runner log directory +  value: "/var/log/glusterfs/gluster-block" +  required: true diff --git a/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml new file mode 100644 index 000000000..28cdb2982 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: heketi +  labels: +    glusterfs: heketi-template +  annotations: +    description: Heketi service deployment template +    tags: glusterfs,heketi +objects: +- kind: Service +  apiVersion: v1 +  metadata: +    name: heketi-${CLUSTER_NAME} +    labels: +      glusterfs: heketi-${CLUSTER_NAME}-service +      heketi: ${CLUSTER_NAME}-service +    annotations: +      description: Exposes Heketi service +  spec: +    ports: +    - name: heketi +      port: 8080 +      targetPort: 8080 +    selector: +      glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route +  apiVersion: v1 +  metadata: +    name: ${HEKETI_ROUTE} +    labels: +      glusterfs: heketi-${CLUSTER_NAME}-route +      heketi: ${CLUSTER_NAME}-route +  spec: +    to: +      kind: Service +      name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig +  apiVersion: v1 +  metadata: +    name: heketi-${CLUSTER_NAME} +    labels: +      glusterfs: heketi-${CLUSTER_NAME}-dc +      heketi: ${CLUSTER_NAME}-dc +    annotations: +      description: Defines how to deploy Heketi +  spec: +    replicas: 1 +    selector: +      glusterfs: heketi-${CLUSTER_NAME}-pod +    triggers: +    - type: ConfigChange +    strategy: +      type: Recreate +    template: +      metadata: +        name: heketi-${CLUSTER_NAME} +        labels: +          glusterfs: heketi-${CLUSTER_NAME}-pod +          heketi: ${CLUSTER_NAME}-pod +      spec: +        serviceAccountName: heketi-${CLUSTER_NAME}-service-account +        containers: +        - name: heketi +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          imagePullPolicy: IfNotPresent +          env: +          - name: HEKETI_USER_KEY +            value: ${HEKETI_USER_KEY} +          - name: HEKETI_ADMIN_KEY +            value: ${HEKETI_ADMIN_KEY} +          - name: HEKETI_EXECUTOR +            value: ${HEKETI_EXECUTOR} +          - name: HEKETI_FSTAB +            value: ${HEKETI_FSTAB} +          - name: HEKETI_SNAPSHOT_LIMIT +            value: '14' +          - name: HEKETI_KUBE_GLUSTER_DAEMONSET +            value: '1' +          ports: +          - containerPort: 8080 +          volumeMounts: +          - name: db +            mountPath: /var/lib/heketi +          - name: config +            mountPath: /etc/heketi +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 3 +            httpGet: +              path: /hello +              port: 8080 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 30 +            httpGet: +              path: /hello +              port: 8080 +        volumes: +        - name: db +          glusterfs: +            endpoints: heketi-db-${CLUSTER_NAME}-endpoints +            path: heketidbstorage +        - name: config +          secret: +            secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY +  displayName: Heketi User Secret +  description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY +  displayName: Heketi Administrator Secret +  description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR +  displayName: heketi executor type +  description: Set the executor type, kubernetes or ssh +  value: kubernetes +- name: HEKETI_FSTAB +  displayName: heketi fstab path +  description: Set the fstab path, file that is populated with bricks that heketi creates +  value: /var/lib/heketi/fstab +- name: HEKETI_ROUTE +  displayName: heketi route name +  description: Set the hostname for the route URL +  value: "heketi-glusterfs" +- name: IMAGE_NAME +  displayName: heketi container image name +  required: True +- name: IMAGE_VERSION +  displayName: heketi container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify this heketi service, useful for running multiple heketi instances +  value: glusterfs diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..11c9195bb --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: +  name: glusterfs-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} +  - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} +  ports: +  - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..3f869d2b7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: +  name: glusterfs-{{ glusterfs_name }}-endpoints +spec: +  ports: +  - port: 1 +status: +  loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 new file mode 100644 index 000000000..095fb780f --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: +  name: glusterfs-{{ glusterfs_name }} +provisioner: kubernetes.io/glusterfs +parameters: +  resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" +  restuser: "admin" +{% if glusterfs_heketi_admin_key is defined %} +  secretNamespace: "{{ glusterfs_namespace }}" +  secretName: "heketi-{{ glusterfs_name }}-admin-secret" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2 new file mode 100644 index 000000000..99cbdf748 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: +  name: heketi-db-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} +  - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} +  ports: +  - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2 new file mode 100644 index 000000000..dcb896441 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: +  name: heketi-db-{{ glusterfs_name }}-endpoints +spec: +  ports: +  - port: 1 +status: +  loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2 new file mode 100644 index 000000000..565e9be98 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2 @@ -0,0 +1,42 @@ +{ +	"_port_comment": "Heketi Server Port Number", +	"port" : "8080", + +	"_use_auth": "Enable JWT authorization. Please enable for deployment", +	"use_auth" : false, + +	"_jwt" : "Private keys for access", +	"jwt" : { +		"_admin" : "Admin has access to all APIs", +		"admin" : { +			"key" : "My Secret" +		}, +		"_user" : "User only has access to /volumes endpoint", +		"user" : { +			"key" : "My Secret" +		} +	}, + +	"_glusterfs_comment": "GlusterFS Configuration", +	"glusterfs" : { + +		"_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", +		"executor" : "{{ glusterfs_heketi_executor }}", + +		"_db_comment": "Database file name", +		"db" : "/var/lib/heketi/heketi.db", + +		"sshexec" : { +			"keyfile" : "/etc/heketi/private_key", +			"port" : "{{ glusterfs_heketi_ssh_port }}", +			"user" : "{{ glusterfs_heketi_ssh_user }}", +			"sudo" : {{ glusterfs_heketi_ssh_sudo | lower }} +		}, + +		"_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted", +		"auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }}, + +		"_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.", +		"block_hosting_volume_size": {{ glusterfs_block_host_vol_size }} +	} +} diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2 new file mode 100644 index 000000000..d6c28f6dd --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2 @@ -0,0 +1,49 @@ +{ +  "clusters": [ +{%- set clusters = {} -%} +{%- for node in glusterfs_nodes -%} +  {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} +  {%- if cluster in clusters -%} +    {%- set _dummy = clusters[cluster].append(node) -%} +  {%- else -%} +    {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} +  {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} +    { +      "nodes": [ +{%- for node in clusters[cluster] -%} +        { +          "node": { +            "hostnames": { +              "manage": [ +{%- if 'glusterfs_hostname' in hostvars[node] -%} +                "{{ hostvars[node].glusterfs_hostname }}" +{%- elif 'openshift' in hostvars[node] -%} +                "{{ hostvars[node].openshift.node.nodename }}" +{%- else -%} +                "{{ node }}" +{%- endif -%} +              ], +              "storage": [ +{%- if 'glusterfs_ip' in hostvars[node] -%} +                "{{ hostvars[node].glusterfs_ip }}" +{%- else -%} +                "{{ hostvars[node].openshift.common.ip }}" +{%- endif -%} +              ] +            }, +            "zone": {{ hostvars[node].glusterfs_zone | default(1) }} +          }, +          "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} +            "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} +          ] +        }{% if not loop.last %},{% endif %} +{%- endfor -%} +      ] +    }{% if not loop.last %},{% endif %} +{%- endfor -%} +  ] +} diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index c4e023c1e..24264fa43 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -1,7 +1,6 @@  ---  - name: setup firewall -  include: firewall.yml -  static: yes +  import_tasks: firewall.yml  - name: Install nfs-utils    package: name=nfs-utils state=present diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index 1c8b9046c..4f9158ade 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -93,11 +93,11 @@    - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']    block:    - name: Set openshift_version for rpm installation -    include: set_version_rpm.yml +    include_tasks: set_version_rpm.yml      when: not is_containerized | bool    - name: Set openshift_version for containerized installation -    include: set_version_containerized.yml +    include_tasks: set_version_containerized.yml      when: is_containerized | bool    - block: diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml index b727eb74d..574e89899 100644 --- a/roles/openshift_version/tasks/set_version_containerized.yml +++ b/roles/openshift_version/tasks/set_version_containerized.yml @@ -20,7 +20,7 @@  - name: Lookup latest containerized version if no version specified    command: > -    docker run --rm {{ openshift.common.cli_image }}:latest version +    docker run --rm {{ openshift_cli_image }}:latest version    register: cli_image_version    when:    - openshift_version is not defined @@ -43,7 +43,7 @@  # and use that value instead.  - name: Set precise containerized version to configure if openshift_release specified    command: > -    docker run --rm {{ openshift.common.cli_image }}:v{{ openshift_version }} version +    docker run --rm {{ openshift_cli_image }}:v{{ openshift_version }} version    register: cli_image_version    when:    - openshift_version is defined diff --git a/roles/os_firewall/tasks/main.yml b/roles/os_firewall/tasks/main.yml index c477d386c..99084cd3f 100644 --- a/roles/os_firewall/tasks/main.yml +++ b/roles/os_firewall/tasks/main.yml @@ -8,12 +8,12 @@    set_fact:      r_os_firewall_is_atomic: "{{ r_os_firewall_ostree_booted.stat.exists }}" -- include: firewalld.yml +- include_tasks: firewalld.yml    when:    - os_firewall_enabled | bool    - os_firewall_use_firewalld | bool -- include: iptables.yml +- include_tasks: iptables.yml    when:    - os_firewall_enabled | bool    - not os_firewall_use_firewalld | bool diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml index 6a4d89a46..71c8ca470 100644 --- a/roles/template_service_broker/tasks/main.yml +++ b/roles/template_service_broker/tasks/main.yml @@ -1,8 +1,8 @@  ---  # do any asserts here -- include: install.yml +- include_tasks: install.yml    when: template_service_broker_install | bool -- include: remove.yml +- include_tasks: remove.yml    when: template_service_broker_remove | bool  | 
