diff options
127 files changed, 2736 insertions, 2356 deletions
| diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index cdfd93725..2a4f80a36 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,16 +1,3 @@ -### <HTPASSWD_AUTH> - -We are aware of the current issues related to htpasswd_auth failures -Please downgrade to ansible 2.2.0.0 until a fix is released. -You can track the status of the bug fix in this issue: -https://github.com/openshift/openshift-ansible/issues/3111 -Please erase this <HTPASSWD_AUTH> section if it does not apply to you. - -Thanks - 2017-01-31 - -### </HTPASSWD_AUTH> - -  #### Description  Provide a brief description of your issue here. For example: diff --git a/.redhat-ci.inventory b/.redhat-ci.inventory new file mode 100644 index 000000000..3c8296055 --- /dev/null +++ b/.redhat-ci.inventory @@ -0,0 +1,22 @@ +[OSEv3:children] +masters +nodes +etcd + +[OSEv3:vars] +ansible_ssh_user=root +ansible_python_interpreter=/usr/bin/python3 +deployment_type=origin +openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}" +openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io" + +[masters] +ocp-master + +[etcd] +ocp-master + +[nodes] +ocp-master openshift_schedulable=false +ocp-node1  openshift_node_labels="{'region':'infra'}" +ocp-node2  openshift_node_labels="{'region':'infra'}" diff --git a/.redhat-ci.yml b/.redhat-ci.yml new file mode 100644 index 000000000..d9849ed60 --- /dev/null +++ b/.redhat-ci.yml @@ -0,0 +1,45 @@ +--- + +cluster: +  hosts: +    - name: ocp-master +      distro: fedora/25/atomic +    - name: ocp-node1 +      distro: fedora/25/atomic +    - name: ocp-node2 +      distro: fedora/25/atomic +  container: +    image: fedora:25 + +packages: +  - gcc +  - python-pip +  - python-devel +  - openssl-devel +  - redhat-rpm-config + +context: 'fedora/25/atomic | origin/v1.5.0-rc.0' + +env: +  OPENSHIFT_IMAGE_TAG: v1.5.0-rc.0 + +tests: +  - pip install ansible==2.2.2.0  # F25 currently has 2.2.1, so install from pypi +  - ansible -vvv -i .redhat-ci.inventory nodes -a 'rpm-ostree status' +  - ansible-playbook -vvv -i .redhat-ci.inventory playbooks/byo/config.yml +  # run a small subset of origin conformance tests to sanity check the cluster +  # NB: we run it on the master since we may be in a different OSP network +  - ssh ocp-master docker run --rm --net=host --privileged +    -v /etc/origin/master/admin.kubeconfig:/config fedora:25 sh -c +    '"dnf install -y origin-tests && +      KUBECONFIG=/config /usr/libexec/origin/extended.test --ginkgo.v=1 +      --ginkgo.noColor --ginkgo.focus=\"Services.*NodePort|EmptyDir\""' + +--- + +inherit: true + +context: 'fedora/25/atomic | origin/v3.6.0-alpha.0' + +env: +  OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.0 diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 030cb2838..ca314ce0c 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.6.23-1 ./ +3.6.37-1 ./ diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index b550bd16a..10c8600ba 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -773,6 +773,23 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):                                      fsType=filesystem,                                      volumeID=volume_id)))                          persistent_volumes.append(persistent_volume) +                    elif kind == 'glusterfs': +                        volume = params['volume']['name'] +                        size = params['volume']['size'] +                        access_modes = params['access']['modes'] +                        endpoints = params['glusterfs']['endpoints'] +                        path = params['glusterfs']['path'] +                        read_only = params['glusterfs']['readOnly'] +                        persistent_volume = dict( +                            name="{0}-volume".format(volume), +                            capacity=size, +                            access_modes=access_modes, +                            storage=dict( +                                glusterfs=dict( +                                    endpoints=endpoints, +                                    path=path, +                                    readOnly=read_only))) +                        persistent_volumes.append(persistent_volume)                      elif not (kind == 'object' or kind == 'dynamic'):                          msg = "|failed invalid storage kind '{0}' for component '{1}'".format(                              kind, diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index a99423411..f70971537 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -426,6 +426,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57  #openshift_hosted_registry_storage_volume_size=10Gi  # +# Native GlusterFS Registry Storage +#openshift_hosted_registry_storage_kind=glusterfs +#  # AWS S3  # S3 bucket must already exist.  #openshift_hosted_registry_storage_kind=object diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 9774aa66b..f5e0de1b0 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -426,6 +426,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57  #openshift_hosted_registry_storage_volume_size=10Gi  # +# Native GlusterFS Registry Storage +#openshift_hosted_registry_storage_kind=glusterfs +#  # AWS S3  #  # S3 bucket must already exist. diff --git a/openshift-ansible.spec b/openshift-ansible.spec index e5cef6ec1..63f0a5a2d 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -9,7 +9,7 @@  %global __requires_exclude ^/usr/bin/ansible-playbook$  Name:           openshift-ansible -Version:        3.6.23 +Version:        3.6.37  Release:        1%{?dist}  Summary:        Openshift and Atomic Enterprise Ansible  License:        ASL 2.0 @@ -76,6 +76,9 @@ find -L %{buildroot}%{_datadir}/ansible/%{name}/playbooks -name filter_plugins -  cp -rp roles %{buildroot}%{_datadir}/ansible/%{name}/  # remove contiv role  rm -rf %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/* +# touch a file in contiv so that it can be added to SCM's +touch %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/.empty_dir +  # openshift_master_facts symlinks filter_plugins/oo_filters.py from ansible_plugins/filter_plugins  pushd %{buildroot}%{_datadir}/ansible/%{name}/roles/openshift_master_facts/filter_plugins  ln -sf ../../../../../ansible_plugins/filter_plugins/oo_filters.py oo_filters.py @@ -270,6 +273,85 @@ Atomic OpenShift Utilities includes  %changelog +* Tue Apr 25 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.37-1 +- Differentiate between service serving router certificate and custom +  openshift_hosted_router_certificate when replacing the router certificate. +  (abutcher@redhat.com) + +* Tue Apr 25 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.36-1 +- Update swap disable tasks (rteague@redhat.com) +- Removing resource version to remove object conflicts caused by race +  conditions. (kwoodson@redhat.com) +- cast openshift_logging_use_mux_client to bool (rmeggins@redhat.com) +- mux does not require privileged, only hostmount-anyuid (rmeggins@redhat.com) +- Switched Heapster to use certificates generated by OpenShift +  (juraci@kroehling.de) +- Use metrics and logging deployer tag v3.4 for enterprise (sdodson@redhat.com) +- Remove v1.5 and v1.6 metrics/logging templates (sdodson@redhat.com) + +* Sun Apr 23 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.35-1 +-  + +* Fri Apr 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.34-1 +- GlusterFS: provide default for groups.oo_glusterfs_to_config in with_items +  (jarrpa@redhat.com) + +* Fri Apr 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.33-1 +- Adding module calls instead of command for idempotency. (kwoodson@redhat.com) +- Use return_value when value is constant (pierre- +  louis.bonicoli@libregerbil.fr) +- Add missing mock for locate_oc_binary method (pierre- +  louis.bonicoli@libregerbil.fr) + +* Fri Apr 21 2017 Scott Dodson <sdodson@redhat.com> 3.6.32-1 +- Don't check excluder versions when they're not enabled (sdodson@redhat.com) + +* Fri Apr 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.31-1 +- Stop all services prior to upgrading, start all services after +  (sdodson@redhat.com) + +* Thu Apr 20 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.30-1 +- Add Ansible syntax checks to tox (rteague@redhat.com) +- Add /etc/sysconfig/etcd to etcd_container (me@fale.io) +- openshift_version: improve messaging (lmeyer@redhat.com) +- Simplify memory availability check, review tests (rhcarvalho@gmail.com) +- Simplify mixin class (rhcarvalho@gmail.com) +- Simplify disk availability check, review tests (rhcarvalho@gmail.com) +- add disk and memory availability check tests (jvallejo@redhat.com) +- add ram and storage preflight check (jvallejo@redhat.com) +- Fix paths for file includes (rteague@redhat.com) +- Fix instantiation of action plugin in test fixture (rhcarvalho@gmail.com) +- Introduce Elasticsearch readiness probe (lukas.vlcek@gmail.com) +- added a empty file to the contiv empty dir. This allows contiv to be vendored +  in git (mwoodson@redhat.com) + +* Wed Apr 19 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.29-1 +- Create openshift-metrics entrypoint playbook (rteague@redhat.com) + +* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.28-1 +- Minor v3.6 upgrade docs fixes (rteague@redhat.com) + +* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.27-1 +- repo: start testing PRs on Fedora Atomic Host (jlebon@redhat.com) + +* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.26-1 +- Correct role dependencies (rteague@redhat.com) +- Allow for GlusterFS to provide registry storage (jarrpa@redhat.com) +- Integrate GlusterFS into OpenShift installation (jarrpa@redhat.com) +- GlusterFS playbook and role (jarrpa@redhat.com) + +* Mon Apr 17 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.25-1 +- Fix default image tag for enterprise (sdodson@redhat.com) +- Cast etcd_debug to a boolean (skuznets@redhat.com) + +* Fri Apr 14 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.24-1 +- tox tests: pin test requirement versions (lmeyer@redhat.com) +- This is no longer a widely encountered issue (sdodson@redhat.com) +- Standardize use of byo and common for network_manager.yml +  (rteague@redhat.com) +- Disable swap space on nodes at install and upgrade (rteague@redhat.com) +- Do not check package version on non-master/node (rhcarvalho@gmail.com) +  * Thu Apr 13 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.23-1  - Refactor initialize groups tasks (rteague@redhat.com)  - tox tests: pin test requirement versions (lmeyer@redhat.com) diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml index cb464cf0d..268a65415 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -13,6 +13,8 @@ g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"  g_nfs_hosts: "{{ groups.nfs | default([]) }}" +g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}" +  g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)                   | union(g_lb_hosts) | union(g_nfs_hosts)                   | union(g_new_node_hosts)| union(g_new_master_hosts) diff --git a/playbooks/byo/openshift-cluster/openshift-metrics.yml b/playbooks/byo/openshift-cluster/openshift-metrics.yml new file mode 100644 index 000000000..5ad3a1a01 --- /dev/null +++ b/playbooks/byo/openshift-cluster/openshift-metrics.yml @@ -0,0 +1,4 @@ +--- +- include: initialize_groups.yml + +- include: ../../common/openshift-cluster/openshift_metrics.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md index 0425ba518..0f64f40f3 100644 --- a/playbooks/byo/openshift-cluster/upgrades/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/README.md @@ -4,5 +4,6 @@ cluster. Additional notes for the associated upgrade playbooks are  provided in their respective directories.  # Upgrades available -- [OpenShift Enterprise 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift origin from 1.4.x to 1.5.x) -- [OpenShift Enterprise 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift origin from 1.3.x to 1.4.x) +- [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x) +- [OpenShift Container Platform 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift Origin from 1.4.x to 1.5.x) +- [OpenShift Container Platform 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift Origin from 1.3.x to 1.4.x) diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md index 930cc753c..797af671a 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md @@ -1,11 +1,10 @@ -# v3.5 Major and Minor Upgrade Playbook +# v3.6 Major and Minor Upgrade Playbook  ## Overview -This playbook currently performs the -following steps. +This playbook currently performs the following steps.   * Upgrade and restart master services - * Unschedule node. + * Unschedule node   * Upgrade and restart docker   * Upgrade and restart node services   * Modifies the subset of the configuration necessary @@ -15,4 +14,7 @@ following steps.   * Updates image streams and quickstarts  ## Usage + +```  ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml +``` diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml index 9bb3ea17f..b23692237 100644 --- a/playbooks/byo/openshift-node/network_manager.yml +++ b/playbooks/byo/openshift-node/network_manager.yml @@ -1,42 +1,4 @@  --- -- name: Create initial host groups for localhost -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  tags: -  - always -  tasks: -  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml -  - name: Evaluate group l_oo_all_hosts -    add_host: -      name: "{{ item }}" -      groups: l_oo_all_hosts -    with_items: "{{ g_all_hosts | default([]) }}" -    changed_when: False +- include: ../openshift-cluster/initialize_groups.yml -- name: Install and configure NetworkManager -  hosts: l_oo_all_hosts -  become: yes -  tasks: -  - name: install NetworkManager -    package: -      name: 'NetworkManager' -      state: present - -  - name: configure NetworkManager -    lineinfile: -      dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" -      regexp: '^{{ item }}=' -      line: '{{ item }}=yes' -      state: present -      create: yes -    with_items: -    - 'USE_PEERDNS' -    - 'NM_CONTROLLED' - -  - name: enable and start NetworkManager -    service: -      name: 'NetworkManager' -      state: started -      enabled: yes +- include: ../../common/openshift-node/network_manager.yml diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 3c70db6a9..239bb211b 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -31,6 +31,10 @@    tags:    - node +- include: ../openshift-glusterfs/config.yml +  tags: +  - glusterfs +  - include: openshift_hosted.yml    tags:    - hosted diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 45a4875a3..6aac70f63 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -29,6 +29,10 @@        msg: The nfs group must be limited to one host      when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}" +  - fail: +      msg: This playbook requires g_glusterfs_hosts to be set +    when: "{{ g_glusterfs_hosts is not defined }}" +    - name: Evaluate oo_all_hosts      add_host:        name: "{{ item }}" @@ -119,3 +123,12 @@        ansible_become: "{{ g_sudo | default(omit) }}"      with_items: "{{ g_nfs_hosts | default([]) }}"      changed_when: no + +  - name: Evaluate oo_glusterfs_to_config +    add_host: +      name: "{{ item }}" +      groups: oo_glusterfs_to_config +      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" +      ansible_become: "{{ g_sudo | default(omit) }}" +    with_items: "{{ g_glusterfs_hosts | default([]) }}" +    changed_when: no diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml index 9f38ceea6..bcff4a1a1 100644 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -1,4 +1,6 @@  --- +- include: evaluate_groups.yml +  - name: OpenShift Metrics    hosts: oo_first_master    roles: diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml index 3b26abcc7..4fa7f9cdf 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml @@ -130,7 +130,7 @@        state: absent      changed_when: false -- include: ../openshift-etcd/restart.yml +- include: ../../openshift-etcd/restart.yml  # Update master config when ca-bundle not referenced. Services will be  # restarted below after new CA certificate has been distributed. @@ -322,7 +322,7 @@        group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout  }}"      with_items: "{{ client_users }}" -- include: ../openshift-master/restart.yml +- include: ../../openshift-master/restart.yml  - name: Distribute OpenShift CA certificate to nodes    hosts: oo_nodes_to_config @@ -371,4 +371,4 @@        state: absent      changed_when: false -- include: ../openshift-node/restart.yml +- include: ../../openshift-node/restart.yml diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml index a7b614341..9f14f2d69 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml @@ -67,7 +67,66 @@          service.alpha.openshift.io/serving-cert-secret-name=router-certs          --config={{ mktemp.stdout }}/admin.kubeconfig          -n default -    when: l_router_dc.rc == 0 and 'router-certs' in router_secrets +    when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is undefined + +  - block: +    - assert: +        that: +        - "'certfile' in openshift_hosted_router_certificate" +        - "'keyfile' in openshift_hosted_router_certificate" +        - "'cafile' in openshift_hosted_router_certificate" +        msg: |- +          openshift_hosted_router_certificate has been set in the inventory but is +          missing one or more required keys. Ensure that 'certfile', 'keyfile', +          and 'cafile' keys have been specified for the openshift_hosted_router_certificate +          inventory variable. + +    - name: Read router certificate and key +      become: no +      local_action: +        module: slurp +        src: "{{ item }}" +      register: openshift_router_certificate_output +      # Defaulting dictionary keys to none to avoid deprecation warnings +      # (future fatal errors) during template evaluation. Dictionary keys +      # won't be accessed unless openshift_hosted_router_certificate is +      # defined and has all keys (certfile, keyfile, cafile) which we +      # check above. +      with_items: +      - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}" +      - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}" +      - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}" + +    - name: Write temporary router certificate file +      copy: +        content: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}" +        dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" +        mode: 0600 + +    - name: Write temporary router key file +      copy: +        content: "{{ (openshift_router_certificate_output.results +                         | oo_collect('content', {'source':(openshift_hosted_router_certificate | default({'keyfile':none})).keyfile}))[0] | b64decode }}" +        dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" +        mode: 0600 + +    - name: Replace router-certs secret +      shell: > +        {{ openshift.common.client_binary }} secrets new router-certs +        tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" +        tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" +        --type=kubernetes.io/tls +        --confirm +        -o json | {{ openshift.common.client_binary }} replace -f - + +    - name: Remove temporary router certificate and key files +      file: +        path: "{{ item }}" +        state: absent +      with_items: +      - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" +      - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" +    when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is defined    - name: Redeploy router      command: > diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml index d1e431c5e..a30952929 100644 --- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml +++ b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml @@ -5,12 +5,13 @@    tasks:    - include: pre/validate_excluder.yml      vars: -      #repoquery_cmd: repoquery_cmd -      #openshift_upgrade_target: openshift_upgrade_target -      excluder: "{{ item }}" -    with_items: -    - "{{ openshift.common.service_type }}-docker-excluder" -    - "{{ openshift.common.service_type }}-excluder" +      excluder: "{{ openshift.common.service_type }}-docker-excluder" +    when: enable_docker_excluder | default(enable_excluders) | default(True) | bool +  - include: pre/validate_excluder.yml +    vars: +      excluder: "{{ openshift.common.service_type }}-excluder" +    when: enable_openshift_excluder | default(enable_excluders) | default(True) | bool +    # disable excluders based on their status    - include_role: diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml new file mode 100644 index 000000000..75faf5ba8 --- /dev/null +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -0,0 +1,21 @@ +--- +- name: Open firewall ports for GlusterFS +  hosts: oo_glusterfs_to_config +  vars: +    os_firewall_allow: +    - service: glusterfs_sshd +      port: "2222/tcp" +    - service: glusterfs_daemon +      port: "24007/tcp" +    - service: glusterfs_management +      port: "24008/tcp" +    - service: glusterfs_bricks +      port: "49152-49251/tcp" +  roles: +  - os_firewall + +- name: Configure GlusterFS +  hosts: oo_first_master +  roles: +  - role: openshift_storage_glusterfs +    when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/common/openshift-glusterfs/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-glusterfs/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-glusterfs/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/common/openshift-glusterfs/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/common/openshift-glusterfs/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-glusterfs/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml new file mode 100644 index 000000000..be050c12c --- /dev/null +++ b/playbooks/common/openshift-node/network_manager.yml @@ -0,0 +1,26 @@ +--- +- name: Install and configure NetworkManager +  hosts: l_oo_all_hosts +  become: yes +  tasks: +  - name: install NetworkManager +    package: +      name: 'NetworkManager' +      state: present + +  - name: configure NetworkManager +    lineinfile: +      dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" +      regexp: '^{{ item }}=' +      line: '{{ item }}=yes' +      state: present +      create: yes +    with_items: +    - 'USE_PEERDNS' +    - 'NM_CONTROLLED' + +  - name: enable and start NetworkManager +    service: +      name: 'NetworkManager' +      state: started +      enabled: yes diff --git a/requirements.txt b/requirements.txt index cadbbe0f5..d00de5ed4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,5 @@ +# Versions are pinned to prevent pypi releases arbitrarily breaking +# tests with new APIs/semantics. We want to update versions deliberately.  ansible==2.2.2.0  click==6.7  pyOpenSSL==16.2.0 diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 index 9151dd0bd..1b5598f46 100644 --- a/roles/etcd/templates/etcd.conf.j2 +++ b/roles/etcd/templates/etcd.conf.j2 @@ -62,7 +62,7 @@ ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}  {% endif -%}  #[logging] -ETCD_DEBUG="{{ etcd_debug | default(false) | string }}" +ETCD_DEBUG="{{ etcd_debug | default(false) | bool | string }}"  {% if etcd_log_package_levels is defined %}  ETCD_LOG_PACKAGE_LEVELS="{{ etcd_log_package_levels }}"  {% endif %} diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 4d083c4d5..8a311cd0f 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -900,6 +900,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 48e80a7cd..0930faadb 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -886,6 +886,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 35168d1a3..6a7be65d0 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -872,6 +872,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 5f7e4b8fa..44923ecd2 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -872,6 +872,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') @@ -1960,7 +1967,7 @@ class PolicyUser(OpenShiftCLI):      @property      def policybindings(self):          if self._policy_bindings is None: -            results = self._get('clusterpolicybindings', None) +            results = self._get('policybindings', None)              if results['returncode'] != 0:                  raise OpenShiftCLIError('Could not retrieve policybindings')              self._policy_bindings = results['results'][0]['items'][0] diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index a6718d921..0604f48bb 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -990,6 +990,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 0e4b336fb..bdcf94a58 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1015,6 +1015,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index a34ce351e..af48ce636 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -864,6 +864,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') @@ -1531,10 +1538,10 @@ class Rule(object):          results = []          for rule in inc_rules: -            results.append(Rule(rule['apiGroups'], -                                rule['attributeRestrictions'], -                                rule['resources'], -                                rule['verbs'])) +            results.append(Rule(rule.get('apiGroups', ['']), +                                rule.get('attributeRestrictions', None), +                                rule.get('resources', []), +                                rule.get('verbs', [])))          return results @@ -1633,7 +1640,7 @@ class OCClusterRole(OpenShiftCLI):      @property      def clusterrole(self):          ''' property for clusterrole''' -        if not self._clusterrole: +        if self._clusterrole is None:              self.get()          return self._clusterrole @@ -1669,6 +1676,7 @@ class OCClusterRole(OpenShiftCLI):          elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:              result['returncode'] = 0 +            self.clusterrole = None          return result @@ -1738,6 +1746,9 @@ class OCClusterRole(OpenShiftCLI):                  # Create it here                  api_rval = oc_clusterrole.create() +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} +                  # return the created object                  api_rval = oc_clusterrole.get() diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 69dd23a0e..385ed888b 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -870,6 +870,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index 70329ccfe..649de547e 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -914,6 +914,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index bda5eebc5..74bf63353 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -881,6 +881,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index 462e14868..2dd3d28ec 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -854,6 +854,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index 8aed060bb..bb7f97689 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -873,6 +873,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index 0d18a7afe..ec9abcda7 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -890,6 +890,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 0b01670c6..706972de2 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -893,6 +893,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index 9b321b47c..bc5245216 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -825,6 +825,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 34f80ce13..de5426c51 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -882,6 +882,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index 331f31e41..02cd810ce 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -879,6 +879,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 3e4601cc3..a9103ebf6 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -874,6 +874,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index 755ab3b02..f005adffc 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -924,6 +924,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index 0c83338b0..9dcb38216 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -868,6 +868,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 26e52a926..2ac0abcec 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -914,6 +914,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index 440cda1b3..0af695e08 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -920,6 +920,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 5eb36ee32..ba8a1fdac 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -866,6 +866,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 1bc788e87..5bff7621c 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -866,6 +866,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index 3009e661a..450a30f57 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -926,6 +926,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 88f295a74..0937df5a1 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -838,6 +838,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 5f936fb49..d0e7e77e1 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -903,6 +903,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py index 88fcc1ddc..37a685ebb 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_user.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py @@ -46,7 +46,7 @@ class PolicyUser(OpenShiftCLI):      @property      def policybindings(self):          if self._policy_bindings is None: -            results = self._get('clusterpolicybindings', None) +            results = self._get('policybindings', None)              if results['returncode'] != 0:                  raise OpenShiftCLIError('Could not retrieve policybindings')              self._policy_bindings = results['results'][0]['items'][0] diff --git a/roles/lib_openshift/src/class/oc_clusterrole.py b/roles/lib_openshift/src/class/oc_clusterrole.py index 1d3d977db..ae6795446 100644 --- a/roles/lib_openshift/src/class/oc_clusterrole.py +++ b/roles/lib_openshift/src/class/oc_clusterrole.py @@ -22,7 +22,7 @@ class OCClusterRole(OpenShiftCLI):      @property      def clusterrole(self):          ''' property for clusterrole''' -        if not self._clusterrole: +        if self._clusterrole is None:              self.get()          return self._clusterrole @@ -58,6 +58,7 @@ class OCClusterRole(OpenShiftCLI):          elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:              result['returncode'] = 0 +            self.clusterrole = None          return result @@ -127,6 +128,9 @@ class OCClusterRole(OpenShiftCLI):                  # Create it here                  api_rval = oc_clusterrole.create() +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} +                  # return the created object                  api_rval = oc_clusterrole.get() diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index 1868b1420..fc1b6f1ec 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -76,6 +76,13 @@ class OpenShiftCLI(object):      def _replace(self, fname, force=False):          '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() +          cmd = ['replace', '-f', fname]          if force:              cmd.append('--force') diff --git a/roles/lib_openshift/src/lib/rule.py b/roles/lib_openshift/src/lib/rule.py index 4590dcf90..fe5ed9723 100644 --- a/roles/lib_openshift/src/lib/rule.py +++ b/roles/lib_openshift/src/lib/rule.py @@ -136,9 +136,9 @@ class Rule(object):          results = []          for rule in inc_rules: -            results.append(Rule(rule['apiGroups'], -                                rule['attributeRestrictions'], -                                rule['resources'], -                                rule['verbs'])) +            results.append(Rule(rule.get('apiGroups', ['']), +                                rule.get('attributeRestrictions', None), +                                rule.get('resources', []), +                                rule.get('verbs', [])))          return results diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py index bab36fddc..30e13ce4b 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py +++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py @@ -205,10 +205,11 @@ class RegistryTest(unittest.TestCase):              }          ]}''' +    @mock.patch('oc_adm_registry.locate_oc_binary')      @mock.patch('oc_adm_registry.Utils._write')      @mock.patch('oc_adm_registry.Utils.create_tmpfile_copy')      @mock.patch('oc_adm_registry.Registry._run') -    def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write): +    def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary):          ''' Testing state present '''          params = {'state': 'present',                    'debug': False, @@ -240,10 +241,9 @@ class RegistryTest(unittest.TestCase):              (0, '', ''),          ] -        mock_tmpfile_copy.side_effect = [ -            '/tmp/mocked_kubeconfig', -            '/tmp/mocked_kubeconfig', -        ] +        mock_tmpfile_copy.return_value = '/tmp/mocked_kubeconfig' + +        mock_oc_binary.return_value = 'oc'          results = Registry.run_ansible(params, False) diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py index 51393dbaf..5481ac623 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py +++ b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py @@ -286,10 +286,11 @@ class RouterTest(unittest.TestCase):      ]  }''' +    @mock.patch('oc_adm_router.locate_oc_binary')      @mock.patch('oc_adm_router.Utils._write')      @mock.patch('oc_adm_router.Utils.create_tmpfile_copy')      @mock.patch('oc_adm_router.Router._run') -    def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write): +    def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary):          ''' Testing a create '''          params = {'state': 'present',                    'debug': False, @@ -345,6 +346,10 @@ class RouterTest(unittest.TestCase):              '/tmp/mocked_kubeconfig',          ] +        mock_oc_binary.side_effect = [ +            'oc', +        ] +          results = Router.run_ansible(params, False)          self.assertTrue(results['changed']) diff --git a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py index da326742f..b19a5a880 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py +++ b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py @@ -25,9 +25,10 @@ class OCObjectValidatorTest(unittest.TestCase):      maxDiff = None +    @mock.patch('oc_objectvalidator.locate_oc_binary')      @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')      @mock.patch('oc_objectvalidator.OCObjectValidator._run') -    def test_no_data(self, mock_cmd, mock_tmpfile_copy): +    def test_no_data(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):          ''' Testing when both all objects are empty '''          # Arrange @@ -62,6 +63,10 @@ class OCObjectValidatorTest(unittest.TestCase):              '/tmp/mocked_kubeconfig',          ] +        mock_oc_binary.side_effect = [ +            'oc', +        ] +          # Act          results = OCObjectValidator.run_ansible(params) @@ -76,9 +81,10 @@ class OCObjectValidatorTest(unittest.TestCase):              mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),          ]) +    @mock.patch('oc_objectvalidator.locate_oc_binary')      @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')      @mock.patch('oc_objectvalidator.OCObjectValidator._run') -    def test_error_code(self, mock_cmd, mock_tmpfile_copy): +    def test_error_code(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):          ''' Testing when we fail to get objects '''          # Arrange @@ -98,6 +104,10 @@ class OCObjectValidatorTest(unittest.TestCase):              '/tmp/mocked_kubeconfig',          ] +        mock_oc_binary.side_effect = [ +            'oc' +        ] +          error_results = {              'returncode': 1,              'stderr': 'Error.', @@ -120,9 +130,10 @@ class OCObjectValidatorTest(unittest.TestCase):              mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),          ]) +    @mock.patch('oc_objectvalidator.locate_oc_binary')      @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')      @mock.patch('oc_objectvalidator.OCObjectValidator._run') -    def test_valid_both(self, mock_cmd, mock_tmpfile_copy): +    def test_valid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):          ''' Testing when both all objects are valid '''          # Arrange @@ -427,6 +438,10 @@ class OCObjectValidatorTest(unittest.TestCase):              '/tmp/mocked_kubeconfig',          ] +        mock_oc_binary.side_effect = [ +            'oc' +        ] +          # Act          results = OCObjectValidator.run_ansible(params) @@ -441,9 +456,10 @@ class OCObjectValidatorTest(unittest.TestCase):              mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),          ]) +    @mock.patch('oc_objectvalidator.locate_oc_binary')      @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')      @mock.patch('oc_objectvalidator.OCObjectValidator._run') -    def test_invalid_both(self, mock_cmd, mock_tmpfile_copy): +    def test_invalid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):          ''' Testing when all objects are invalid '''          # Arrange @@ -886,6 +902,10 @@ class OCObjectValidatorTest(unittest.TestCase):              '/tmp/mocked_kubeconfig',          ] +        mock_oc_binary.side_effect = [ +            'oc' +        ] +          # Act          results = OCObjectValidator.run_ansible(params) diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 7edf141e5..adeb85c3f 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -2155,6 +2155,10 @@ class OpenShiftFacts(object):                          nfs=dict(                              directory='/exports',                              options='*(rw,root_squash)'), +                        glusterfs=dict( +                            endpoints='glusterfs-registry-endpoints', +                            path='glusterfs-registry-volume', +                            readOnly=False),                          host=None,                          access=dict(                              modes=['ReadWriteMany'] diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py new file mode 100644 index 000000000..c2792a0fe --- /dev/null +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -0,0 +1,65 @@ +# pylint: disable=missing-docstring +from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var +from openshift_checks.mixins import NotContainerizedMixin + + +class DiskAvailability(NotContainerizedMixin, OpenShiftCheck): +    """Check that recommended disk space is available before a first-time install.""" + +    name = "disk_availability" +    tags = ["preflight"] + +    # Values taken from the official installation documentation: +    # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements +    recommended_disk_space_bytes = { +        "masters": 40 * 10**9, +        "nodes": 15 * 10**9, +        "etcd": 20 * 10**9, +    } + +    @classmethod +    def is_active(cls, task_vars): +        """Skip hosts that do not have recommended disk space requirements.""" +        group_names = get_var(task_vars, "group_names", default=[]) +        has_disk_space_recommendation = bool(set(group_names).intersection(cls.recommended_disk_space_bytes)) +        return super(DiskAvailability, cls).is_active(task_vars) and has_disk_space_recommendation + +    def run(self, tmp, task_vars): +        group_names = get_var(task_vars, "group_names") +        ansible_mounts = get_var(task_vars, "ansible_mounts") + +        min_free_bytes = max(self.recommended_disk_space_bytes.get(name, 0) for name in group_names) +        free_bytes = self.openshift_available_disk(ansible_mounts) + +        if free_bytes < min_free_bytes: +            return { +                'failed': True, +                'msg': ( +                    'Available disk space ({:.1f} GB) for the volume containing ' +                    '"/var" is below minimum recommended space ({:.1f} GB)' +                ).format(float(free_bytes) / 10**9, float(min_free_bytes) / 10**9) +            } + +        return {} + +    @staticmethod +    def openshift_available_disk(ansible_mounts): +        """Determine the available disk space for an OpenShift installation. + +        ansible_mounts should be a list of dicts like the 'setup' Ansible module +        returns. +        """ +        # priority list in descending order +        supported_mnt_paths = ["/var", "/"] +        available_mnts = {mnt.get("mount"): mnt for mnt in ansible_mounts} + +        try: +            for path in supported_mnt_paths: +                if path in available_mnts: +                    return available_mnts[path]["size_available"] +        except KeyError: +            pass + +        paths = ''.join(sorted(available_mnts)) or 'none' +        msg = "Unable to determine available disk space. Paths mounted: {}.".format(paths) +        raise OpenShiftCheckException(msg) diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py new file mode 100644 index 000000000..28805dc37 --- /dev/null +++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py @@ -0,0 +1,44 @@ +# pylint: disable=missing-docstring +from openshift_checks import OpenShiftCheck, get_var + + +class MemoryAvailability(OpenShiftCheck): +    """Check that recommended memory is available.""" + +    name = "memory_availability" +    tags = ["preflight"] + +    # Values taken from the official installation documentation: +    # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements +    recommended_memory_bytes = { +        "masters": 16 * 10**9, +        "nodes": 8 * 10**9, +        "etcd": 20 * 10**9, +    } + +    @classmethod +    def is_active(cls, task_vars): +        """Skip hosts that do not have recommended memory requirements.""" +        group_names = get_var(task_vars, "group_names", default=[]) +        has_memory_recommendation = bool(set(group_names).intersection(cls.recommended_memory_bytes)) +        return super(MemoryAvailability, cls).is_active(task_vars) and has_memory_recommendation + +    def run(self, tmp, task_vars): +        group_names = get_var(task_vars, "group_names") +        total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * 10**6 + +        min_memory_bytes = max(self.recommended_memory_bytes.get(name, 0) for name in group_names) + +        if total_memory_bytes < min_memory_bytes: +            return { +                'failed': True, +                'msg': ( +                    'Available memory ({available:.1f} GB) ' +                    'below recommended value ({recommended:.1f} GB)' +                ).format( +                    available=float(total_memory_bytes) / 10**9, +                    recommended=float(min_memory_bytes) / 10**9, +                ), +            } + +        return {} diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index 657e15160..20d160eaf 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -1,4 +1,8 @@ -# pylint: disable=missing-docstring +# pylint: disable=missing-docstring,too-few-public-methods +""" +Mixin classes meant to be used with subclasses of OpenShiftCheck. +""" +  from openshift_checks import get_var @@ -7,12 +11,5 @@ class NotContainerizedMixin(object):      @classmethod      def is_active(cls, task_vars): -        return ( -            # This mixin is meant to be used with subclasses of OpenShiftCheck. -            super(NotContainerizedMixin, cls).is_active(task_vars) and -            not cls.is_containerized(task_vars) -        ) - -    @staticmethod -    def is_containerized(task_vars): -        return get_var(task_vars, "openshift", "common", "is_containerized") +        is_containerized = get_var(task_vars, "openshift", "common", "is_containerized") +        return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py index a877246f4..2693ae37b 100644 --- a/roles/openshift_health_checker/test/action_plugin_test.py +++ b/roles/openshift_health_checker/test/action_plugin_test.py @@ -1,5 +1,7 @@  import pytest +from ansible.playbook.play_context import PlayContext +  from openshift_health_check import ActionModule, resolve_checks  from openshift_checks import OpenShiftCheckException @@ -34,7 +36,7 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru  @pytest.fixture  def plugin():      task = FakeTask('openshift_health_check', {'checks': ['fake_check']}) -    plugin = ActionModule(task, None, None, None, None, None) +    plugin = ActionModule(task, None, PlayContext(), None, None, None)      return plugin diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py new file mode 100644 index 000000000..970b474d7 --- /dev/null +++ b/roles/openshift_health_checker/test/disk_availability_test.py @@ -0,0 +1,155 @@ +import pytest + +from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckException + + +@pytest.mark.parametrize('group_names,is_containerized,is_active', [ +    (['masters'], False, True), +    # ensure check is skipped on containerized installs +    (['masters'], True, False), +    (['nodes'], False, True), +    (['etcd'], False, True), +    (['masters', 'nodes'], False, True), +    (['masters', 'etcd'], False, True), +    ([], False, False), +    (['lb'], False, False), +    (['nfs'], False, False), +]) +def test_is_active(group_names, is_containerized, is_active): +    task_vars = dict( +        group_names=group_names, +        openshift=dict(common=dict(is_containerized=is_containerized)), +    ) +    assert DiskAvailability.is_active(task_vars=task_vars) == is_active + + +@pytest.mark.parametrize('ansible_mounts,extra_words', [ +    ([], ['none']),  # empty ansible_mounts +    ([{'mount': '/mnt'}], ['/mnt']),  # missing relevant mount paths +    ([{'mount': '/var'}], ['/var']),  # missing size_available +]) +def test_cannot_determine_available_disk(ansible_mounts, extra_words): +    task_vars = dict( +        group_names=['masters'], +        ansible_mounts=ansible_mounts, +    ) +    check = DiskAvailability(execute_module=fake_execute_module) + +    with pytest.raises(OpenShiftCheckException) as excinfo: +        check.run(tmp=None, task_vars=task_vars) + +    for word in 'determine available disk'.split() + extra_words: +        assert word in str(excinfo.value) + + +@pytest.mark.parametrize('group_names,ansible_mounts', [ +    ( +        ['masters'], +        [{ +            'mount': '/', +            'size_available': 40 * 10**9 + 1, +        }], +    ), +    ( +        ['nodes'], +        [{ +            'mount': '/', +            'size_available': 15 * 10**9 + 1, +        }], +    ), +    ( +        ['etcd'], +        [{ +            'mount': '/', +            'size_available': 20 * 10**9 + 1, +        }], +    ), +    ( +        ['etcd'], +        [{ +            # not enough space on / ... +            'mount': '/', +            'size_available': 0, +        }, { +            # ... but enough on /var +            'mount': '/var', +            'size_available': 20 * 10**9 + 1, +        }], +    ), +]) +def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts): +    task_vars = dict( +        group_names=group_names, +        ansible_mounts=ansible_mounts, +    ) + +    check = DiskAvailability(execute_module=fake_execute_module) +    result = check.run(tmp=None, task_vars=task_vars) + +    assert not result.get('failed', False) + + +@pytest.mark.parametrize('group_names,ansible_mounts,extra_words', [ +    ( +        ['masters'], +        [{ +            'mount': '/', +            'size_available': 1, +        }], +        ['0.0 GB'], +    ), +    ( +        ['nodes'], +        [{ +            'mount': '/', +            'size_available': 1 * 10**9, +        }], +        ['1.0 GB'], +    ), +    ( +        ['etcd'], +        [{ +            'mount': '/', +            'size_available': 1, +        }], +        ['0.0 GB'], +    ), +    ( +        ['nodes', 'masters'], +        [{ +            'mount': '/', +            # enough space for a node, not enough for a master +            'size_available': 15 * 10**9 + 1, +        }], +        ['15.0 GB'], +    ), +    ( +        ['etcd'], +        [{ +            # enough space on / ... +            'mount': '/', +            'size_available': 20 * 10**9 + 1, +        }, { +            # .. but not enough on /var +            'mount': '/var', +            'size_available': 0, +        }], +        ['0.0 GB'], +    ), +]) +def test_fails_with_insufficient_disk_space(group_names, ansible_mounts, extra_words): +    task_vars = dict( +        group_names=group_names, +        ansible_mounts=ansible_mounts, +    ) + +    check = DiskAvailability(execute_module=fake_execute_module) +    result = check.run(tmp=None, task_vars=task_vars) + +    assert result['failed'] +    for word in 'below recommended'.split() + extra_words: +        assert word in result['msg'] + + +def fake_execute_module(*args): +    raise AssertionError('this function should not be called') diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py new file mode 100644 index 000000000..e161a5b9e --- /dev/null +++ b/roles/openshift_health_checker/test/memory_availability_test.py @@ -0,0 +1,91 @@ +import pytest + +from openshift_checks.memory_availability import MemoryAvailability + + +@pytest.mark.parametrize('group_names,is_active', [ +    (['masters'], True), +    (['nodes'], True), +    (['etcd'], True), +    (['masters', 'nodes'], True), +    (['masters', 'etcd'], True), +    ([], False), +    (['lb'], False), +    (['nfs'], False), +]) +def test_is_active(group_names, is_active): +    task_vars = dict( +        group_names=group_names, +    ) +    assert MemoryAvailability.is_active(task_vars=task_vars) == is_active + + +@pytest.mark.parametrize('group_names,ansible_memtotal_mb', [ +    ( +        ['masters'], +        17200, +    ), +    ( +        ['nodes'], +        8200, +    ), +    ( +        ['etcd'], +        22200, +    ), +    ( +        ['masters', 'nodes'], +        17000, +    ), +]) +def test_succeeds_with_recommended_memory(group_names, ansible_memtotal_mb): +    task_vars = dict( +        group_names=group_names, +        ansible_memtotal_mb=ansible_memtotal_mb, +    ) + +    check = MemoryAvailability(execute_module=fake_execute_module) +    result = check.run(tmp=None, task_vars=task_vars) + +    assert not result.get('failed', False) + + +@pytest.mark.parametrize('group_names,ansible_memtotal_mb,extra_words', [ +    ( +        ['masters'], +        0, +        ['0.0 GB'], +    ), +    ( +        ['nodes'], +        100, +        ['0.1 GB'], +    ), +    ( +        ['etcd'], +        -1, +        ['0.0 GB'], +    ), +    ( +        ['nodes', 'masters'], +        # enough memory for a node, not enough for a master +        11000, +        ['11.0 GB'], +    ), +]) +def test_fails_with_insufficient_memory(group_names, ansible_memtotal_mb, extra_words): +    task_vars = dict( +        group_names=group_names, +        ansible_memtotal_mb=ansible_memtotal_mb, +    ) + +    check = MemoryAvailability(execute_module=fake_execute_module) +    result = check.run(tmp=None, task_vars=task_vars) + +    assert result['failed'] +    for word in 'below recommended'.split() + extra_words: +        assert word in result['msg'] + + +def fake_execute_module(*args): +    raise AssertionError('this function should not be called') diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index 0b8042473..6e691c26f 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -109,7 +109,7 @@        type: persistentVolumeClaim        claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim"    when: -  - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack'] +  - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs']  - name: Create OpenShift registry    oc_adm_registry: @@ -123,3 +123,7 @@      volume_mounts: "{{ openshift_hosted_registry_volumes }}"      edits: "{{ openshift_hosted_registry_edits }}"      force: "{{ True|bool in openshift_hosted_registry_force }}" + +- include: storage/glusterfs.yml +  when: +  - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml new file mode 100644 index 000000000..b18b24266 --- /dev/null +++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml @@ -0,0 +1,51 @@ +--- +- name: Wait for registry pods +  oc_obj: +    namespace: "{{ openshift_hosted_registry_namespace }}" +    state: list +    kind: pod +    selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}" +  register: registry_pods +  until: +  - "registry_pods.results.results[0]['items'] | count > 0" +  # There must be as many matching pods with 'Ready' status True as there are expected replicas +  - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int" +  delay: 10 +  retries: "{{ (600 / 10) | int }}" + +- name: Determine registry fsGroup +  set_fact: +    openshift_hosted_registry_fsgroup: "{{ registry_pods.results.results[0]['items'][0].spec.securityContext.fsGroup }}" + +- name: Create temp mount directory +  command: mktemp -d /tmp/openshift-glusterfs-registry-XXXXXX +  register: mktemp +  changed_when: False +  check_mode: no + +- name: Mount registry volume +  mount: +    state: mounted +    fstype: glusterfs +    src: "{{ groups.oo_glusterfs_to_config[0] }}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" +    name: "{{ mktemp.stdout }}" + +- name: Set registry volume permissions +  file: +    dest: "{{ mktemp.stdout }}" +    state: directory +    group: "{{ openshift_hosted_registry_fsgroup }}" +    mode: "2775" +    recurse: True + +- name: Unmount registry volume +  mount: +    state: unmounted +    name: "{{ mktemp.stdout }}" + +- name: Delete temp mount directory +  file: +    dest: "{{ mktemp.stdout }}" +    state: absent +  changed_when: False +  check_mode: no diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml index c67058696..5abb2ef83 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml @@ -223,7 +223,7 @@ items:    -      description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set version "3.4.0"'      name: IMAGE_VERSION -    value: "3.4.0" +    value: "v3.4"    -      description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."      name: IMAGE_PULL_SECRET diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml index 6ead122c5..1d319eab8 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml @@ -105,7 +105,7 @@ parameters:  -    description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'    name: IMAGE_VERSION -  value: "3.4.0" +  value: "v3.4"  -    description: "Internal URL for the master, for authentication retrieval"    name: MASTER_URL @@ -118,7 +118,7 @@ parameters:    description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"    name: MODE    value: "deploy" --  +-    description: "Set to true to continue even if the deployer runs into an error."    name: CONTINUE_ON_ERROR    value: "false" diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml deleted file mode 100644 index fdfc285ca..000000000 --- a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml +++ /dev/null @@ -1,345 +0,0 @@ -apiVersion: "v1" -kind: "List" -items: -- -  apiVersion: "v1" -  kind: "Template" -  metadata: -    name: logging-deployer-account-template -    annotations: -      description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." -      tags: "infrastructure" -  objects: -  - -    apiVersion: v1 -    kind: ServiceAccount -    name: logging-deployer -    metadata: -      name: logging-deployer -      labels: -        logging-infra: deployer -        provider: openshift -        component: deployer -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-kibana -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-elasticsearch -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-fluentd -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-curator -  - apiVersion: v1 -    kind: ClusterRole -    metadata: -      name: oauth-editor -    rules: -    - resources: -      - oauthclients -      verbs: -      - create -      - delete -  - apiVersion: v1 -    kind: ClusterRole -    metadata: -      name: daemonset-admin -    rules: -    - resources: -      - daemonsets -      apiGroups: -      - extensions -      verbs: -      - create -      - get -      - list -      - watch -      - delete -      - update -  - apiVersion: v1 -    kind: ClusterRole -    metadata: -      name: rolebinding-reader -    rules: -    - resources: -      - clusterrolebindings -      verbs: -      - get -  - -    apiVersion: v1 -    kind: RoleBinding -    metadata: -      name: logging-deployer-edit-role -    roleRef: -      kind: ClusterRole -      name: edit -    subjects: -    - kind: ServiceAccount -      name: logging-deployer -  - -    apiVersion: v1 -    kind: RoleBinding -    metadata: -      name: logging-deployer-dsadmin-role -    roleRef: -      kind: ClusterRole -      name: daemonset-admin -    subjects: -    - kind: ServiceAccount -      name: logging-deployer -  - -    apiVersion: v1 -    kind: RoleBinding -    metadata: -      name: logging-elasticsearch-view-role -    roleRef: -      kind: ClusterRole -      name: view -    subjects: -    - kind: ServiceAccount -      name: aggregated-logging-elasticsearch -- -  apiVersion: "v1" -  kind: "Template" -  metadata: -    name: logging-deployer-template -    annotations: -      description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." -      tags: "infrastructure" -  labels: -    logging-infra: deployer -    provider: openshift -  objects: -  - -    apiVersion: v1 -    kind: Pod -    metadata: -      generateName: logging-deployer- -    spec: -      containers: -      - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION} -        imagePullPolicy: Always -        name: deployer -        volumeMounts: -        - name: empty -          mountPath: /etc/deploy -        env: -          - name: PROJECT -            valueFrom: -              fieldRef: -                fieldPath: metadata.namespace -          - name: IMAGE_PREFIX -            value: ${IMAGE_PREFIX} -          - name: IMAGE_VERSION -            value: ${IMAGE_VERSION} -          - name: IMAGE_PULL_SECRET -            value: ${IMAGE_PULL_SECRET} -          - name: INSECURE_REGISTRY -            value: ${INSECURE_REGISTRY} -          - name: ENABLE_OPS_CLUSTER -            value: ${ENABLE_OPS_CLUSTER} -          - name: KIBANA_HOSTNAME -            value: ${KIBANA_HOSTNAME} -          - name: KIBANA_OPS_HOSTNAME -            value: ${KIBANA_OPS_HOSTNAME} -          - name: PUBLIC_MASTER_URL -            value: ${PUBLIC_MASTER_URL} -          - name: MASTER_URL -            value: ${MASTER_URL} -          - name: ES_INSTANCE_RAM -            value: ${ES_INSTANCE_RAM} -          - name: ES_PVC_SIZE -            value: ${ES_PVC_SIZE} -          - name: ES_PVC_PREFIX -            value: ${ES_PVC_PREFIX} -          - name: ES_PVC_DYNAMIC -            value: ${ES_PVC_DYNAMIC} -          - name: ES_CLUSTER_SIZE -            value: ${ES_CLUSTER_SIZE} -          - name: ES_NODE_QUORUM -            value: ${ES_NODE_QUORUM} -          - name: ES_RECOVER_AFTER_NODES -            value: ${ES_RECOVER_AFTER_NODES} -          - name: ES_RECOVER_EXPECTED_NODES -            value: ${ES_RECOVER_EXPECTED_NODES} -          - name: ES_RECOVER_AFTER_TIME -            value: ${ES_RECOVER_AFTER_TIME} -          - name: ES_OPS_INSTANCE_RAM -            value: ${ES_OPS_INSTANCE_RAM} -          - name: ES_OPS_PVC_SIZE -            value: ${ES_OPS_PVC_SIZE} -          - name: ES_OPS_PVC_PREFIX -            value: ${ES_OPS_PVC_PREFIX} -          - name: ES_OPS_PVC_DYNAMIC -            value: ${ES_OPS_PVC_DYNAMIC} -          - name: ES_OPS_CLUSTER_SIZE -            value: ${ES_OPS_CLUSTER_SIZE} -          - name: ES_OPS_NODE_QUORUM -            value: ${ES_OPS_NODE_QUORUM} -          - name: ES_OPS_RECOVER_AFTER_NODES -            value: ${ES_OPS_RECOVER_AFTER_NODES} -          - name: ES_OPS_RECOVER_EXPECTED_NODES -            value: ${ES_OPS_RECOVER_EXPECTED_NODES} -          - name: ES_OPS_RECOVER_AFTER_TIME -            value: ${ES_OPS_RECOVER_AFTER_TIME} -          - name: FLUENTD_NODESELECTOR -            value: ${FLUENTD_NODESELECTOR} -          - name: ES_NODESELECTOR -            value: ${ES_NODESELECTOR} -          - name: ES_OPS_NODESELECTOR -            value: ${ES_OPS_NODESELECTOR} -          - name: KIBANA_NODESELECTOR -            value: ${KIBANA_NODESELECTOR} -          - name: KIBANA_OPS_NODESELECTOR -            value: ${KIBANA_OPS_NODESELECTOR} -          - name: CURATOR_NODESELECTOR -            value: ${CURATOR_NODESELECTOR} -          - name: CURATOR_OPS_NODESELECTOR -            value: ${CURATOR_OPS_NODESELECTOR} -          - name: MODE -            value: ${MODE} -      dnsPolicy: ClusterFirst -      restartPolicy: Never -      serviceAccount: logging-deployer -      volumes: -      - name: empty -        emptyDir: {} -  parameters: -  - -    description: "The mode that the deployer runs in." -    name: MODE -    value: "install" -  - -    description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"' -    name: IMAGE_PREFIX -    value: "registry.access.redhat.com/openshift3/" -  - -    description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"' -    name: IMAGE_VERSION -    value: "3.4.0" -  - -    description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." -    name: IMAGE_PULL_SECRET -  - -    description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" -    name: INSECURE_REGISTRY -    value: "false" -  - -    description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." -    name: ENABLE_OPS_CLUSTER -    value: "false" -  - -    description: "(Deprecated) External hostname where clients will reach kibana" -    name: KIBANA_HOSTNAME -    value: "kibana.example.com" -  - -    description: "(Deprecated) External hostname at which admins will visit the ops Kibana." -    name: KIBANA_OPS_HOSTNAME -    value: kibana-ops.example.com -  - -    description: "(Deprecated) External URL for the master, for OAuth purposes" -    name: PUBLIC_MASTER_URL -    value: "https://localhost:8443" -  - -    description: "(Deprecated) Internal URL for the master, for authentication retrieval" -    name: MASTER_URL -    value: "https://kubernetes.default.svc.cluster.local" -  - -    description: "(Deprecated) How many instances of ElasticSearch to deploy." -    name: ES_CLUSTER_SIZE -    value: "1" -  - -    description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." -    name: ES_INSTANCE_RAM -    value: "8G" -  - -    description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." -    name: ES_PVC_SIZE -  - -    description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." -    name: ES_PVC_PREFIX -    value: "logging-es-" -  - -    description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' -    name: ES_PVC_DYNAMIC -  - -    description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." -    name: ES_NODE_QUORUM -  - -    description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." -    name: ES_RECOVER_AFTER_NODES -  - -    description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." -    name: ES_RECOVER_EXPECTED_NODES -  - -    description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." -    name: ES_RECOVER_AFTER_TIME -    value: "5m" -  - -    description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." -    name: ES_OPS_CLUSTER_SIZE -  - -    description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." -    name: ES_OPS_INSTANCE_RAM -    value: "8G" -  - -    description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." -    name: ES_OPS_PVC_SIZE -  - -    description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." -    name: ES_OPS_PVC_PREFIX -    value: "logging-es-ops-" -  - -    description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' -    name: ES_OPS_PVC_DYNAMIC -  - -    description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." -    name: ES_OPS_NODE_QUORUM -  - -    description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." -    name: ES_OPS_RECOVER_AFTER_NODES -  - -    description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." -    name: ES_OPS_RECOVER_EXPECTED_NODES -  - -    description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." -    name: ES_OPS_RECOVER_AFTER_TIME -    value: "5m" -  - -    description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." -    name: FLUENTD_NODESELECTOR -    value: "logging-infra-fluentd=true" -  - -    description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." -    name: ES_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." -    name: ES_OPS_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Kibana cluster (label=value)." -    name: KIBANA_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Kibana operations cluster (label=value)." -    name: KIBANA_OPS_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Curator (label=value)." -    name: CURATOR_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector operations Curator (label=value)." -    name: CURATOR_OPS_NODESELECTOR -    value: "" diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml deleted file mode 100644 index c4ab794ae..000000000 --- a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: -  name: metrics-deployer-template -  annotations: -    description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." -    tags: "infrastructure" -labels: -  metrics-infra: deployer -  provider: openshift -  component: deployer -objects: -- -  apiVersion: v1 -  kind: Pod -  metadata: -    generateName: metrics-deployer- -  spec: -    securityContext: {} -    containers: -    - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} -      name: deployer -      securityContext: {} -      volumeMounts: -      - name: secret -        mountPath: /secret -        readOnly: true -      - name: empty -        mountPath: /etc/deploy -      env: -        - name: PROJECT -          valueFrom: -            fieldRef: -              fieldPath: metadata.namespace -        - name: POD_NAME -          valueFrom: -            fieldRef: -              fieldPath: metadata.name -        - name: IMAGE_PREFIX -          value: ${IMAGE_PREFIX} -        - name: IMAGE_VERSION -          value: ${IMAGE_VERSION} -        - name: MASTER_URL -          value: ${MASTER_URL} -        - name: MODE -          value: ${MODE} -        - name: CONTINUE_ON_ERROR -          value: ${CONTINUE_ON_ERROR} -        - name: REDEPLOY -          value: ${REDEPLOY} -        - name: IGNORE_PREFLIGHT -          value: ${IGNORE_PREFLIGHT} -        - name: USE_PERSISTENT_STORAGE -          value: ${USE_PERSISTENT_STORAGE} -        - name: DYNAMICALLY_PROVISION_STORAGE -          value: ${DYNAMICALLY_PROVISION_STORAGE} -        - name: HAWKULAR_METRICS_HOSTNAME -          value: ${HAWKULAR_METRICS_HOSTNAME} -        - name: CASSANDRA_NODES -          value: ${CASSANDRA_NODES} -        - name: CASSANDRA_PV_SIZE -          value: ${CASSANDRA_PV_SIZE} -        - name: METRIC_DURATION -          value: ${METRIC_DURATION} -        - name: USER_WRITE_ACCESS -          value: ${USER_WRITE_ACCESS} -        - name: HEAPSTER_NODE_ID -          value: ${HEAPSTER_NODE_ID} -        - name: METRIC_RESOLUTION -          value: ${METRIC_RESOLUTION} -        - name: STARTUP_TIMEOUT -          value: ${STARTUP_TIMEOUT} -    dnsPolicy: ClusterFirst -    restartPolicy: Never -    serviceAccount: metrics-deployer -    volumes: -    - name: empty -      emptyDir: {} -    - name: secret -      secret: -        secretName: metrics-deployer -parameters: -- -  description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' -  name: IMAGE_PREFIX -  value: "registry.access.redhat.com/openshift3/" -- -  description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' -  name: IMAGE_VERSION -  value: "v3.5" -- -  description: "Internal URL for the master, for authentication retrieval" -  name: MASTER_URL -  value: "https://kubernetes.default.svc:443" -- -  description: "External hostname where clients will reach Hawkular Metrics" -  name: HAWKULAR_METRICS_HOSTNAME -  required: true -- -  description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" -  name: MODE -  value: "deploy" --  -  description: "Set to true to continue even if the deployer runs into an error." -  name: CONTINUE_ON_ERROR -  value: "false" -- -  description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)" -  name: REDEPLOY -  value: "false" -- -  description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." -  name: IGNORE_PREFLIGHT -  value: "false" -- -  description: "Set to true for persistent storage, set to false to use non persistent storage" -  name: USE_PERSISTENT_STORAGE -  value: "true" -- -  description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" -  name: DYNAMICALLY_PROVISION_STORAGE -  value: "false" -- -  description: "The number of Cassandra Nodes to deploy for the initial cluster" -  name: CASSANDRA_NODES -  value: "1" -- -  description: "The persistent volume size for each of the Cassandra nodes" -  name: CASSANDRA_PV_SIZE -  value: "10Gi" -- -  description: "How many days metrics should be stored for." -  name: METRIC_DURATION -  value: "7" -- -  description: "If a user accounts should be allowed to write metrics." -  name: USER_WRITE_ACCESS -  value: "false" -- -  description: "The identifier used when generating metric ids in Hawkular" -  name: HEAPSTER_NODE_ID -  value: "nodename" -- -  description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds" -  name: METRIC_RESOLUTION -  value: "30s" -- -  description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" -  name: STARTUP_TIMEOUT -  value: "500" diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml deleted file mode 100644 index 5b5503500..000000000 --- a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml +++ /dev/null @@ -1,342 +0,0 @@ -apiVersion: "v1" -kind: "List" -items: -- -  apiVersion: "v1" -  kind: "Template" -  metadata: -    name: logging-deployer-account-template -    annotations: -      description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." -      tags: "infrastructure" -  objects: -  - -    apiVersion: v1 -    kind: ServiceAccount -    name: logging-deployer -    metadata: -      name: logging-deployer -      labels: -        logging-infra: deployer -        provider: openshift -        component: deployer -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-kibana -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-elasticsearch -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-fluentd -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-curator -  - apiVersion: v1 -    kind: ClusterRole -    metadata: -      name: oauth-editor -    rules: -    - resources: -      - oauthclients -      verbs: -      - create -      - delete -  - apiVersion: v1 -    kind: ClusterRole -    metadata: -      name: daemonset-admin -    rules: -    - resources: -      - daemonsets -      apiGroups: -      - extensions -      verbs: -      - create -      - get -      - list -      - watch -      - delete -      - update -  - apiVersion: v1 -    kind: ClusterRole -    metadata: -      name: rolebinding-reader -    rules: -    - resources: -      - clusterrolebindings -      verbs: -      - get -  - -    apiVersion: v1 -    kind: RoleBinding -    metadata: -      name: logging-deployer-edit-role -    roleRef: -      name: edit -    subjects: -    - kind: ServiceAccount -      name: logging-deployer -  - -    apiVersion: v1 -    kind: RoleBinding -    metadata: -      name: logging-deployer-dsadmin-role -    roleRef: -      name: daemonset-admin -    subjects: -    - kind: ServiceAccount -      name: logging-deployer -  - -    apiVersion: v1 -    kind: RoleBinding -    metadata: -      name: logging-elasticsearch-view-role -    roleRef: -      name: view -    subjects: -    - kind: ServiceAccount -      name: aggregated-logging-elasticsearch -- -  apiVersion: "v1" -  kind: "Template" -  metadata: -    name: logging-deployer-template -    annotations: -      description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." -      tags: "infrastructure" -  labels: -    logging-infra: deployer -    provider: openshift -  objects: -  - -    apiVersion: v1 -    kind: Pod -    metadata: -      generateName: logging-deployer- -    spec: -      containers: -      - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION} -        imagePullPolicy: Always -        name: deployer -        volumeMounts: -        - name: empty -          mountPath: /etc/deploy -        env: -          - name: PROJECT -            valueFrom: -              fieldRef: -                fieldPath: metadata.namespace -          - name: IMAGE_PREFIX -            value: ${IMAGE_PREFIX} -          - name: IMAGE_VERSION -            value: ${IMAGE_VERSION} -          - name: IMAGE_PULL_SECRET -            value: ${IMAGE_PULL_SECRET} -          - name: INSECURE_REGISTRY -            value: ${INSECURE_REGISTRY} -          - name: ENABLE_OPS_CLUSTER -            value: ${ENABLE_OPS_CLUSTER} -          - name: KIBANA_HOSTNAME -            value: ${KIBANA_HOSTNAME} -          - name: KIBANA_OPS_HOSTNAME -            value: ${KIBANA_OPS_HOSTNAME} -          - name: PUBLIC_MASTER_URL -            value: ${PUBLIC_MASTER_URL} -          - name: MASTER_URL -            value: ${MASTER_URL} -          - name: ES_INSTANCE_RAM -            value: ${ES_INSTANCE_RAM} -          - name: ES_PVC_SIZE -            value: ${ES_PVC_SIZE} -          - name: ES_PVC_PREFIX -            value: ${ES_PVC_PREFIX} -          - name: ES_PVC_DYNAMIC -            value: ${ES_PVC_DYNAMIC} -          - name: ES_CLUSTER_SIZE -            value: ${ES_CLUSTER_SIZE} -          - name: ES_NODE_QUORUM -            value: ${ES_NODE_QUORUM} -          - name: ES_RECOVER_AFTER_NODES -            value: ${ES_RECOVER_AFTER_NODES} -          - name: ES_RECOVER_EXPECTED_NODES -            value: ${ES_RECOVER_EXPECTED_NODES} -          - name: ES_RECOVER_AFTER_TIME -            value: ${ES_RECOVER_AFTER_TIME} -          - name: ES_OPS_INSTANCE_RAM -            value: ${ES_OPS_INSTANCE_RAM} -          - name: ES_OPS_PVC_SIZE -            value: ${ES_OPS_PVC_SIZE} -          - name: ES_OPS_PVC_PREFIX -            value: ${ES_OPS_PVC_PREFIX} -          - name: ES_OPS_PVC_DYNAMIC -            value: ${ES_OPS_PVC_DYNAMIC} -          - name: ES_OPS_CLUSTER_SIZE -            value: ${ES_OPS_CLUSTER_SIZE} -          - name: ES_OPS_NODE_QUORUM -            value: ${ES_OPS_NODE_QUORUM} -          - name: ES_OPS_RECOVER_AFTER_NODES -            value: ${ES_OPS_RECOVER_AFTER_NODES} -          - name: ES_OPS_RECOVER_EXPECTED_NODES -            value: ${ES_OPS_RECOVER_EXPECTED_NODES} -          - name: ES_OPS_RECOVER_AFTER_TIME -            value: ${ES_OPS_RECOVER_AFTER_TIME} -          - name: FLUENTD_NODESELECTOR -            value: ${FLUENTD_NODESELECTOR} -          - name: ES_NODESELECTOR -            value: ${ES_NODESELECTOR} -          - name: ES_OPS_NODESELECTOR -            value: ${ES_OPS_NODESELECTOR} -          - name: KIBANA_NODESELECTOR -            value: ${KIBANA_NODESELECTOR} -          - name: KIBANA_OPS_NODESELECTOR -            value: ${KIBANA_OPS_NODESELECTOR} -          - name: CURATOR_NODESELECTOR -            value: ${CURATOR_NODESELECTOR} -          - name: CURATOR_OPS_NODESELECTOR -            value: ${CURATOR_OPS_NODESELECTOR} -          - name: MODE -            value: ${MODE} -      dnsPolicy: ClusterFirst -      restartPolicy: Never -      serviceAccount: logging-deployer -      volumes: -      - name: empty -        emptyDir: {} -  parameters: -  - -    description: "The mode that the deployer runs in." -    name: MODE -    value: "install" -  - -    description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' -    name: IMAGE_PREFIX -    value: "docker.io/openshift/origin-" -  - -    description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' -    name: IMAGE_VERSION -    value: "latest" -  - -    description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." -    name: IMAGE_PULL_SECRET -  - -    description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" -    name: INSECURE_REGISTRY -    value: "false" -  - -    description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." -    name: ENABLE_OPS_CLUSTER -    value: "false" -  - -    description: "(Deprecated) External hostname where clients will reach kibana" -    name: KIBANA_HOSTNAME -    value: "kibana.example.com" -  - -    description: "(Deprecated) External hostname at which admins will visit the ops Kibana." -    name: KIBANA_OPS_HOSTNAME -    value: kibana-ops.example.com -  - -    description: "(Deprecated) External URL for the master, for OAuth purposes" -    name: PUBLIC_MASTER_URL -    value: "https://localhost:8443" -  - -    description: "(Deprecated) Internal URL for the master, for authentication retrieval" -    name: MASTER_URL -    value: "https://kubernetes.default.svc.cluster.local" -  - -    description: "(Deprecated) How many instances of ElasticSearch to deploy." -    name: ES_CLUSTER_SIZE -    value: "1" -  - -    description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." -    name: ES_INSTANCE_RAM -    value: "8G" -  - -    description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." -    name: ES_PVC_SIZE -  - -    description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." -    name: ES_PVC_PREFIX -    value: "logging-es-" -  - -    description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' -    name: ES_PVC_DYNAMIC -  - -    description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." -    name: ES_NODE_QUORUM -  - -    description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." -    name: ES_RECOVER_AFTER_NODES -  - -    description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." -    name: ES_RECOVER_EXPECTED_NODES -  - -    description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." -    name: ES_RECOVER_AFTER_TIME -    value: "5m" -  - -    description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." -    name: ES_OPS_CLUSTER_SIZE -  - -    description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." -    name: ES_OPS_INSTANCE_RAM -    value: "8G" -  - -    description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." -    name: ES_OPS_PVC_SIZE -  - -    description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." -    name: ES_OPS_PVC_PREFIX -    value: "logging-es-ops-" -  - -    description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' -    name: ES_OPS_PVC_DYNAMIC -  - -    description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." -    name: ES_OPS_NODE_QUORUM -  - -    description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." -    name: ES_OPS_RECOVER_AFTER_NODES -  - -    description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." -    name: ES_OPS_RECOVER_EXPECTED_NODES -  - -    description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." -    name: ES_OPS_RECOVER_AFTER_TIME -    value: "5m" -  - -    description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." -    name: FLUENTD_NODESELECTOR -    value: "logging-infra-fluentd=true" -  - -    description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." -    name: ES_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." -    name: ES_OPS_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Kibana cluster (label=value)." -    name: KIBANA_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Kibana operations cluster (label=value)." -    name: KIBANA_OPS_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Curator (label=value)." -    name: CURATOR_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector operations Curator (label=value)." -    name: CURATOR_OPS_NODESELECTOR -    value: "" diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml deleted file mode 100644 index d191c0439..000000000 --- a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: -  name: metrics-deployer-template -  annotations: -    description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." -    tags: "infrastructure" -labels: -  metrics-infra: deployer -  provider: openshift -  component: deployer -objects: -- -  apiVersion: v1 -  kind: Pod -  metadata: -    generateName: metrics-deployer- -  spec: -    securityContext: {} -    containers: -    - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} -      name: deployer -      securityContext: {} -      volumeMounts: -      - name: secret -        mountPath: /secret -        readOnly: true -      - name: empty -        mountPath: /etc/deploy -      env: -        - name: PROJECT -          valueFrom: -            fieldRef: -              fieldPath: metadata.namespace -        - name: POD_NAME -          valueFrom: -            fieldRef: -              fieldPath: metadata.name -        - name: IMAGE_PREFIX -          value: ${IMAGE_PREFIX} -        - name: IMAGE_VERSION -          value: ${IMAGE_VERSION} -        - name: MASTER_URL -          value: ${MASTER_URL} -        - name: MODE -          value: ${MODE} -        - name: CONTINUE_ON_ERROR -          value: ${CONTINUE_ON_ERROR} -        - name: REDEPLOY -          value: ${REDEPLOY} -        - name: IGNORE_PREFLIGHT -          value: ${IGNORE_PREFLIGHT} -        - name: USE_PERSISTENT_STORAGE -          value: ${USE_PERSISTENT_STORAGE} -        - name: DYNAMICALLY_PROVISION_STORAGE -          value: ${DYNAMICALLY_PROVISION_STORAGE} -        - name: HAWKULAR_METRICS_HOSTNAME -          value: ${HAWKULAR_METRICS_HOSTNAME} -        - name: CASSANDRA_NODES -          value: ${CASSANDRA_NODES} -        - name: CASSANDRA_PV_SIZE -          value: ${CASSANDRA_PV_SIZE} -        - name: METRIC_DURATION -          value: ${METRIC_DURATION} -        - name: USER_WRITE_ACCESS -          value: ${USER_WRITE_ACCESS} -        - name: HEAPSTER_NODE_ID -          value: ${HEAPSTER_NODE_ID} -        - name: METRIC_RESOLUTION -          value: ${METRIC_RESOLUTION} -        - name: STARTUP_TIMEOUT -          value: ${STARTUP_TIMEOUT} -    dnsPolicy: ClusterFirst -    restartPolicy: Never -    serviceAccount: metrics-deployer -    volumes: -    - name: empty -      emptyDir: {} -    - name: secret -      secret: -        secretName: metrics-deployer -parameters: -- -  description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' -  name: IMAGE_PREFIX -  value: "openshift/origin-" -- -  description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' -  name: IMAGE_VERSION -  value: "latest" -- -  description: "Internal URL for the master, for authentication retrieval" -  name: MASTER_URL -  value: "https://kubernetes.default.svc:443" -- -  description: "External hostname where clients will reach Hawkular Metrics" -  name: HAWKULAR_METRICS_HOSTNAME -  required: true -- -  description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" -  name: MODE -  value: "deploy" --  -  description: "Set to true to continue even if the deployer runs into an error." -  name: CONTINUE_ON_ERROR -  value: "false" -- -  description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)" -  name: REDEPLOY -  value: "false" -- -  description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." -  name: IGNORE_PREFLIGHT -  value: "false" -- -  description: "Set to true for persistent storage, set to false to use non persistent storage" -  name: USE_PERSISTENT_STORAGE -  value: "true" -- -  description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" -  name: DYNAMICALLY_PROVISION_STORAGE -  value: "false" -- -  description: "The number of Cassandra Nodes to deploy for the initial cluster" -  name: CASSANDRA_NODES -  value: "1" -- -  description: "The persistent volume size for each of the Cassandra nodes" -  name: CASSANDRA_PV_SIZE -  value: "10Gi" -- -  description: "How many days metrics should be stored for." -  name: METRIC_DURATION -  value: "7" -- -  description: "If a user accounts should be allowed to write metrics." -  name: USER_WRITE_ACCESS -  value: "false" -- -  description: "The identifier used when generating metric ids in Hawkular" -  name: HEAPSTER_NODE_ID -  value: "nodename" -- -  description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds" -  name: METRIC_RESOLUTION -  value: "30s" -- -  description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" -  name: STARTUP_TIMEOUT -  value: "500" diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml deleted file mode 100644 index fdfc285ca..000000000 --- a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml +++ /dev/null @@ -1,345 +0,0 @@ -apiVersion: "v1" -kind: "List" -items: -- -  apiVersion: "v1" -  kind: "Template" -  metadata: -    name: logging-deployer-account-template -    annotations: -      description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." -      tags: "infrastructure" -  objects: -  - -    apiVersion: v1 -    kind: ServiceAccount -    name: logging-deployer -    metadata: -      name: logging-deployer -      labels: -        logging-infra: deployer -        provider: openshift -        component: deployer -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-kibana -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-elasticsearch -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-fluentd -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-curator -  - apiVersion: v1 -    kind: ClusterRole -    metadata: -      name: oauth-editor -    rules: -    - resources: -      - oauthclients -      verbs: -      - create -      - delete -  - apiVersion: v1 -    kind: ClusterRole -    metadata: -      name: daemonset-admin -    rules: -    - resources: -      - daemonsets -      apiGroups: -      - extensions -      verbs: -      - create -      - get -      - list -      - watch -      - delete -      - update -  - apiVersion: v1 -    kind: ClusterRole -    metadata: -      name: rolebinding-reader -    rules: -    - resources: -      - clusterrolebindings -      verbs: -      - get -  - -    apiVersion: v1 -    kind: RoleBinding -    metadata: -      name: logging-deployer-edit-role -    roleRef: -      kind: ClusterRole -      name: edit -    subjects: -    - kind: ServiceAccount -      name: logging-deployer -  - -    apiVersion: v1 -    kind: RoleBinding -    metadata: -      name: logging-deployer-dsadmin-role -    roleRef: -      kind: ClusterRole -      name: daemonset-admin -    subjects: -    - kind: ServiceAccount -      name: logging-deployer -  - -    apiVersion: v1 -    kind: RoleBinding -    metadata: -      name: logging-elasticsearch-view-role -    roleRef: -      kind: ClusterRole -      name: view -    subjects: -    - kind: ServiceAccount -      name: aggregated-logging-elasticsearch -- -  apiVersion: "v1" -  kind: "Template" -  metadata: -    name: logging-deployer-template -    annotations: -      description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." -      tags: "infrastructure" -  labels: -    logging-infra: deployer -    provider: openshift -  objects: -  - -    apiVersion: v1 -    kind: Pod -    metadata: -      generateName: logging-deployer- -    spec: -      containers: -      - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION} -        imagePullPolicy: Always -        name: deployer -        volumeMounts: -        - name: empty -          mountPath: /etc/deploy -        env: -          - name: PROJECT -            valueFrom: -              fieldRef: -                fieldPath: metadata.namespace -          - name: IMAGE_PREFIX -            value: ${IMAGE_PREFIX} -          - name: IMAGE_VERSION -            value: ${IMAGE_VERSION} -          - name: IMAGE_PULL_SECRET -            value: ${IMAGE_PULL_SECRET} -          - name: INSECURE_REGISTRY -            value: ${INSECURE_REGISTRY} -          - name: ENABLE_OPS_CLUSTER -            value: ${ENABLE_OPS_CLUSTER} -          - name: KIBANA_HOSTNAME -            value: ${KIBANA_HOSTNAME} -          - name: KIBANA_OPS_HOSTNAME -            value: ${KIBANA_OPS_HOSTNAME} -          - name: PUBLIC_MASTER_URL -            value: ${PUBLIC_MASTER_URL} -          - name: MASTER_URL -            value: ${MASTER_URL} -          - name: ES_INSTANCE_RAM -            value: ${ES_INSTANCE_RAM} -          - name: ES_PVC_SIZE -            value: ${ES_PVC_SIZE} -          - name: ES_PVC_PREFIX -            value: ${ES_PVC_PREFIX} -          - name: ES_PVC_DYNAMIC -            value: ${ES_PVC_DYNAMIC} -          - name: ES_CLUSTER_SIZE -            value: ${ES_CLUSTER_SIZE} -          - name: ES_NODE_QUORUM -            value: ${ES_NODE_QUORUM} -          - name: ES_RECOVER_AFTER_NODES -            value: ${ES_RECOVER_AFTER_NODES} -          - name: ES_RECOVER_EXPECTED_NODES -            value: ${ES_RECOVER_EXPECTED_NODES} -          - name: ES_RECOVER_AFTER_TIME -            value: ${ES_RECOVER_AFTER_TIME} -          - name: ES_OPS_INSTANCE_RAM -            value: ${ES_OPS_INSTANCE_RAM} -          - name: ES_OPS_PVC_SIZE -            value: ${ES_OPS_PVC_SIZE} -          - name: ES_OPS_PVC_PREFIX -            value: ${ES_OPS_PVC_PREFIX} -          - name: ES_OPS_PVC_DYNAMIC -            value: ${ES_OPS_PVC_DYNAMIC} -          - name: ES_OPS_CLUSTER_SIZE -            value: ${ES_OPS_CLUSTER_SIZE} -          - name: ES_OPS_NODE_QUORUM -            value: ${ES_OPS_NODE_QUORUM} -          - name: ES_OPS_RECOVER_AFTER_NODES -            value: ${ES_OPS_RECOVER_AFTER_NODES} -          - name: ES_OPS_RECOVER_EXPECTED_NODES -            value: ${ES_OPS_RECOVER_EXPECTED_NODES} -          - name: ES_OPS_RECOVER_AFTER_TIME -            value: ${ES_OPS_RECOVER_AFTER_TIME} -          - name: FLUENTD_NODESELECTOR -            value: ${FLUENTD_NODESELECTOR} -          - name: ES_NODESELECTOR -            value: ${ES_NODESELECTOR} -          - name: ES_OPS_NODESELECTOR -            value: ${ES_OPS_NODESELECTOR} -          - name: KIBANA_NODESELECTOR -            value: ${KIBANA_NODESELECTOR} -          - name: KIBANA_OPS_NODESELECTOR -            value: ${KIBANA_OPS_NODESELECTOR} -          - name: CURATOR_NODESELECTOR -            value: ${CURATOR_NODESELECTOR} -          - name: CURATOR_OPS_NODESELECTOR -            value: ${CURATOR_OPS_NODESELECTOR} -          - name: MODE -            value: ${MODE} -      dnsPolicy: ClusterFirst -      restartPolicy: Never -      serviceAccount: logging-deployer -      volumes: -      - name: empty -        emptyDir: {} -  parameters: -  - -    description: "The mode that the deployer runs in." -    name: MODE -    value: "install" -  - -    description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"' -    name: IMAGE_PREFIX -    value: "registry.access.redhat.com/openshift3/" -  - -    description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"' -    name: IMAGE_VERSION -    value: "3.4.0" -  - -    description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." -    name: IMAGE_PULL_SECRET -  - -    description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" -    name: INSECURE_REGISTRY -    value: "false" -  - -    description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." -    name: ENABLE_OPS_CLUSTER -    value: "false" -  - -    description: "(Deprecated) External hostname where clients will reach kibana" -    name: KIBANA_HOSTNAME -    value: "kibana.example.com" -  - -    description: "(Deprecated) External hostname at which admins will visit the ops Kibana." -    name: KIBANA_OPS_HOSTNAME -    value: kibana-ops.example.com -  - -    description: "(Deprecated) External URL for the master, for OAuth purposes" -    name: PUBLIC_MASTER_URL -    value: "https://localhost:8443" -  - -    description: "(Deprecated) Internal URL for the master, for authentication retrieval" -    name: MASTER_URL -    value: "https://kubernetes.default.svc.cluster.local" -  - -    description: "(Deprecated) How many instances of ElasticSearch to deploy." -    name: ES_CLUSTER_SIZE -    value: "1" -  - -    description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." -    name: ES_INSTANCE_RAM -    value: "8G" -  - -    description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." -    name: ES_PVC_SIZE -  - -    description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." -    name: ES_PVC_PREFIX -    value: "logging-es-" -  - -    description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' -    name: ES_PVC_DYNAMIC -  - -    description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." -    name: ES_NODE_QUORUM -  - -    description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." -    name: ES_RECOVER_AFTER_NODES -  - -    description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." -    name: ES_RECOVER_EXPECTED_NODES -  - -    description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." -    name: ES_RECOVER_AFTER_TIME -    value: "5m" -  - -    description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." -    name: ES_OPS_CLUSTER_SIZE -  - -    description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." -    name: ES_OPS_INSTANCE_RAM -    value: "8G" -  - -    description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." -    name: ES_OPS_PVC_SIZE -  - -    description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." -    name: ES_OPS_PVC_PREFIX -    value: "logging-es-ops-" -  - -    description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' -    name: ES_OPS_PVC_DYNAMIC -  - -    description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." -    name: ES_OPS_NODE_QUORUM -  - -    description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." -    name: ES_OPS_RECOVER_AFTER_NODES -  - -    description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." -    name: ES_OPS_RECOVER_EXPECTED_NODES -  - -    description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." -    name: ES_OPS_RECOVER_AFTER_TIME -    value: "5m" -  - -    description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." -    name: FLUENTD_NODESELECTOR -    value: "logging-infra-fluentd=true" -  - -    description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." -    name: ES_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." -    name: ES_OPS_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Kibana cluster (label=value)." -    name: KIBANA_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Kibana operations cluster (label=value)." -    name: KIBANA_OPS_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Curator (label=value)." -    name: CURATOR_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector operations Curator (label=value)." -    name: CURATOR_OPS_NODESELECTOR -    value: "" diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml deleted file mode 100644 index c4ab794ae..000000000 --- a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: -  name: metrics-deployer-template -  annotations: -    description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." -    tags: "infrastructure" -labels: -  metrics-infra: deployer -  provider: openshift -  component: deployer -objects: -- -  apiVersion: v1 -  kind: Pod -  metadata: -    generateName: metrics-deployer- -  spec: -    securityContext: {} -    containers: -    - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} -      name: deployer -      securityContext: {} -      volumeMounts: -      - name: secret -        mountPath: /secret -        readOnly: true -      - name: empty -        mountPath: /etc/deploy -      env: -        - name: PROJECT -          valueFrom: -            fieldRef: -              fieldPath: metadata.namespace -        - name: POD_NAME -          valueFrom: -            fieldRef: -              fieldPath: metadata.name -        - name: IMAGE_PREFIX -          value: ${IMAGE_PREFIX} -        - name: IMAGE_VERSION -          value: ${IMAGE_VERSION} -        - name: MASTER_URL -          value: ${MASTER_URL} -        - name: MODE -          value: ${MODE} -        - name: CONTINUE_ON_ERROR -          value: ${CONTINUE_ON_ERROR} -        - name: REDEPLOY -          value: ${REDEPLOY} -        - name: IGNORE_PREFLIGHT -          value: ${IGNORE_PREFLIGHT} -        - name: USE_PERSISTENT_STORAGE -          value: ${USE_PERSISTENT_STORAGE} -        - name: DYNAMICALLY_PROVISION_STORAGE -          value: ${DYNAMICALLY_PROVISION_STORAGE} -        - name: HAWKULAR_METRICS_HOSTNAME -          value: ${HAWKULAR_METRICS_HOSTNAME} -        - name: CASSANDRA_NODES -          value: ${CASSANDRA_NODES} -        - name: CASSANDRA_PV_SIZE -          value: ${CASSANDRA_PV_SIZE} -        - name: METRIC_DURATION -          value: ${METRIC_DURATION} -        - name: USER_WRITE_ACCESS -          value: ${USER_WRITE_ACCESS} -        - name: HEAPSTER_NODE_ID -          value: ${HEAPSTER_NODE_ID} -        - name: METRIC_RESOLUTION -          value: ${METRIC_RESOLUTION} -        - name: STARTUP_TIMEOUT -          value: ${STARTUP_TIMEOUT} -    dnsPolicy: ClusterFirst -    restartPolicy: Never -    serviceAccount: metrics-deployer -    volumes: -    - name: empty -      emptyDir: {} -    - name: secret -      secret: -        secretName: metrics-deployer -parameters: -- -  description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' -  name: IMAGE_PREFIX -  value: "registry.access.redhat.com/openshift3/" -- -  description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' -  name: IMAGE_VERSION -  value: "v3.5" -- -  description: "Internal URL for the master, for authentication retrieval" -  name: MASTER_URL -  value: "https://kubernetes.default.svc:443" -- -  description: "External hostname where clients will reach Hawkular Metrics" -  name: HAWKULAR_METRICS_HOSTNAME -  required: true -- -  description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" -  name: MODE -  value: "deploy" --  -  description: "Set to true to continue even if the deployer runs into an error." -  name: CONTINUE_ON_ERROR -  value: "false" -- -  description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)" -  name: REDEPLOY -  value: "false" -- -  description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." -  name: IGNORE_PREFLIGHT -  value: "false" -- -  description: "Set to true for persistent storage, set to false to use non persistent storage" -  name: USE_PERSISTENT_STORAGE -  value: "true" -- -  description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" -  name: DYNAMICALLY_PROVISION_STORAGE -  value: "false" -- -  description: "The number of Cassandra Nodes to deploy for the initial cluster" -  name: CASSANDRA_NODES -  value: "1" -- -  description: "The persistent volume size for each of the Cassandra nodes" -  name: CASSANDRA_PV_SIZE -  value: "10Gi" -- -  description: "How many days metrics should be stored for." -  name: METRIC_DURATION -  value: "7" -- -  description: "If a user accounts should be allowed to write metrics." -  name: USER_WRITE_ACCESS -  value: "false" -- -  description: "The identifier used when generating metric ids in Hawkular" -  name: HEAPSTER_NODE_ID -  value: "nodename" -- -  description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds" -  name: METRIC_RESOLUTION -  value: "30s" -- -  description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" -  name: STARTUP_TIMEOUT -  value: "500" diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml deleted file mode 100644 index 5b5503500..000000000 --- a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml +++ /dev/null @@ -1,342 +0,0 @@ -apiVersion: "v1" -kind: "List" -items: -- -  apiVersion: "v1" -  kind: "Template" -  metadata: -    name: logging-deployer-account-template -    annotations: -      description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." -      tags: "infrastructure" -  objects: -  - -    apiVersion: v1 -    kind: ServiceAccount -    name: logging-deployer -    metadata: -      name: logging-deployer -      labels: -        logging-infra: deployer -        provider: openshift -        component: deployer -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-kibana -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-elasticsearch -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-fluentd -  - -    apiVersion: v1 -    kind: ServiceAccount -    metadata: -      name: aggregated-logging-curator -  - apiVersion: v1 -    kind: ClusterRole -    metadata: -      name: oauth-editor -    rules: -    - resources: -      - oauthclients -      verbs: -      - create -      - delete -  - apiVersion: v1 -    kind: ClusterRole -    metadata: -      name: daemonset-admin -    rules: -    - resources: -      - daemonsets -      apiGroups: -      - extensions -      verbs: -      - create -      - get -      - list -      - watch -      - delete -      - update -  - apiVersion: v1 -    kind: ClusterRole -    metadata: -      name: rolebinding-reader -    rules: -    - resources: -      - clusterrolebindings -      verbs: -      - get -  - -    apiVersion: v1 -    kind: RoleBinding -    metadata: -      name: logging-deployer-edit-role -    roleRef: -      name: edit -    subjects: -    - kind: ServiceAccount -      name: logging-deployer -  - -    apiVersion: v1 -    kind: RoleBinding -    metadata: -      name: logging-deployer-dsadmin-role -    roleRef: -      name: daemonset-admin -    subjects: -    - kind: ServiceAccount -      name: logging-deployer -  - -    apiVersion: v1 -    kind: RoleBinding -    metadata: -      name: logging-elasticsearch-view-role -    roleRef: -      name: view -    subjects: -    - kind: ServiceAccount -      name: aggregated-logging-elasticsearch -- -  apiVersion: "v1" -  kind: "Template" -  metadata: -    name: logging-deployer-template -    annotations: -      description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." -      tags: "infrastructure" -  labels: -    logging-infra: deployer -    provider: openshift -  objects: -  - -    apiVersion: v1 -    kind: Pod -    metadata: -      generateName: logging-deployer- -    spec: -      containers: -      - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION} -        imagePullPolicy: Always -        name: deployer -        volumeMounts: -        - name: empty -          mountPath: /etc/deploy -        env: -          - name: PROJECT -            valueFrom: -              fieldRef: -                fieldPath: metadata.namespace -          - name: IMAGE_PREFIX -            value: ${IMAGE_PREFIX} -          - name: IMAGE_VERSION -            value: ${IMAGE_VERSION} -          - name: IMAGE_PULL_SECRET -            value: ${IMAGE_PULL_SECRET} -          - name: INSECURE_REGISTRY -            value: ${INSECURE_REGISTRY} -          - name: ENABLE_OPS_CLUSTER -            value: ${ENABLE_OPS_CLUSTER} -          - name: KIBANA_HOSTNAME -            value: ${KIBANA_HOSTNAME} -          - name: KIBANA_OPS_HOSTNAME -            value: ${KIBANA_OPS_HOSTNAME} -          - name: PUBLIC_MASTER_URL -            value: ${PUBLIC_MASTER_URL} -          - name: MASTER_URL -            value: ${MASTER_URL} -          - name: ES_INSTANCE_RAM -            value: ${ES_INSTANCE_RAM} -          - name: ES_PVC_SIZE -            value: ${ES_PVC_SIZE} -          - name: ES_PVC_PREFIX -            value: ${ES_PVC_PREFIX} -          - name: ES_PVC_DYNAMIC -            value: ${ES_PVC_DYNAMIC} -          - name: ES_CLUSTER_SIZE -            value: ${ES_CLUSTER_SIZE} -          - name: ES_NODE_QUORUM -            value: ${ES_NODE_QUORUM} -          - name: ES_RECOVER_AFTER_NODES -            value: ${ES_RECOVER_AFTER_NODES} -          - name: ES_RECOVER_EXPECTED_NODES -            value: ${ES_RECOVER_EXPECTED_NODES} -          - name: ES_RECOVER_AFTER_TIME -            value: ${ES_RECOVER_AFTER_TIME} -          - name: ES_OPS_INSTANCE_RAM -            value: ${ES_OPS_INSTANCE_RAM} -          - name: ES_OPS_PVC_SIZE -            value: ${ES_OPS_PVC_SIZE} -          - name: ES_OPS_PVC_PREFIX -            value: ${ES_OPS_PVC_PREFIX} -          - name: ES_OPS_PVC_DYNAMIC -            value: ${ES_OPS_PVC_DYNAMIC} -          - name: ES_OPS_CLUSTER_SIZE -            value: ${ES_OPS_CLUSTER_SIZE} -          - name: ES_OPS_NODE_QUORUM -            value: ${ES_OPS_NODE_QUORUM} -          - name: ES_OPS_RECOVER_AFTER_NODES -            value: ${ES_OPS_RECOVER_AFTER_NODES} -          - name: ES_OPS_RECOVER_EXPECTED_NODES -            value: ${ES_OPS_RECOVER_EXPECTED_NODES} -          - name: ES_OPS_RECOVER_AFTER_TIME -            value: ${ES_OPS_RECOVER_AFTER_TIME} -          - name: FLUENTD_NODESELECTOR -            value: ${FLUENTD_NODESELECTOR} -          - name: ES_NODESELECTOR -            value: ${ES_NODESELECTOR} -          - name: ES_OPS_NODESELECTOR -            value: ${ES_OPS_NODESELECTOR} -          - name: KIBANA_NODESELECTOR -            value: ${KIBANA_NODESELECTOR} -          - name: KIBANA_OPS_NODESELECTOR -            value: ${KIBANA_OPS_NODESELECTOR} -          - name: CURATOR_NODESELECTOR -            value: ${CURATOR_NODESELECTOR} -          - name: CURATOR_OPS_NODESELECTOR -            value: ${CURATOR_OPS_NODESELECTOR} -          - name: MODE -            value: ${MODE} -      dnsPolicy: ClusterFirst -      restartPolicy: Never -      serviceAccount: logging-deployer -      volumes: -      - name: empty -        emptyDir: {} -  parameters: -  - -    description: "The mode that the deployer runs in." -    name: MODE -    value: "install" -  - -    description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' -    name: IMAGE_PREFIX -    value: "docker.io/openshift/origin-" -  - -    description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' -    name: IMAGE_VERSION -    value: "latest" -  - -    description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." -    name: IMAGE_PULL_SECRET -  - -    description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" -    name: INSECURE_REGISTRY -    value: "false" -  - -    description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." -    name: ENABLE_OPS_CLUSTER -    value: "false" -  - -    description: "(Deprecated) External hostname where clients will reach kibana" -    name: KIBANA_HOSTNAME -    value: "kibana.example.com" -  - -    description: "(Deprecated) External hostname at which admins will visit the ops Kibana." -    name: KIBANA_OPS_HOSTNAME -    value: kibana-ops.example.com -  - -    description: "(Deprecated) External URL for the master, for OAuth purposes" -    name: PUBLIC_MASTER_URL -    value: "https://localhost:8443" -  - -    description: "(Deprecated) Internal URL for the master, for authentication retrieval" -    name: MASTER_URL -    value: "https://kubernetes.default.svc.cluster.local" -  - -    description: "(Deprecated) How many instances of ElasticSearch to deploy." -    name: ES_CLUSTER_SIZE -    value: "1" -  - -    description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." -    name: ES_INSTANCE_RAM -    value: "8G" -  - -    description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." -    name: ES_PVC_SIZE -  - -    description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." -    name: ES_PVC_PREFIX -    value: "logging-es-" -  - -    description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' -    name: ES_PVC_DYNAMIC -  - -    description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." -    name: ES_NODE_QUORUM -  - -    description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." -    name: ES_RECOVER_AFTER_NODES -  - -    description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." -    name: ES_RECOVER_EXPECTED_NODES -  - -    description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." -    name: ES_RECOVER_AFTER_TIME -    value: "5m" -  - -    description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." -    name: ES_OPS_CLUSTER_SIZE -  - -    description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." -    name: ES_OPS_INSTANCE_RAM -    value: "8G" -  - -    description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." -    name: ES_OPS_PVC_SIZE -  - -    description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." -    name: ES_OPS_PVC_PREFIX -    value: "logging-es-ops-" -  - -    description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' -    name: ES_OPS_PVC_DYNAMIC -  - -    description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." -    name: ES_OPS_NODE_QUORUM -  - -    description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." -    name: ES_OPS_RECOVER_AFTER_NODES -  - -    description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." -    name: ES_OPS_RECOVER_EXPECTED_NODES -  - -    description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." -    name: ES_OPS_RECOVER_AFTER_TIME -    value: "5m" -  - -    description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." -    name: FLUENTD_NODESELECTOR -    value: "logging-infra-fluentd=true" -  - -    description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." -    name: ES_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." -    name: ES_OPS_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Kibana cluster (label=value)." -    name: KIBANA_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Kibana operations cluster (label=value)." -    name: KIBANA_OPS_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector Curator (label=value)." -    name: CURATOR_NODESELECTOR -    value: "" -  - -    description: "(Deprecated) Node selector operations Curator (label=value)." -    name: CURATOR_OPS_NODESELECTOR -    value: "" diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml deleted file mode 100644 index d191c0439..000000000 --- a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: -  name: metrics-deployer-template -  annotations: -    description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." -    tags: "infrastructure" -labels: -  metrics-infra: deployer -  provider: openshift -  component: deployer -objects: -- -  apiVersion: v1 -  kind: Pod -  metadata: -    generateName: metrics-deployer- -  spec: -    securityContext: {} -    containers: -    - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} -      name: deployer -      securityContext: {} -      volumeMounts: -      - name: secret -        mountPath: /secret -        readOnly: true -      - name: empty -        mountPath: /etc/deploy -      env: -        - name: PROJECT -          valueFrom: -            fieldRef: -              fieldPath: metadata.namespace -        - name: POD_NAME -          valueFrom: -            fieldRef: -              fieldPath: metadata.name -        - name: IMAGE_PREFIX -          value: ${IMAGE_PREFIX} -        - name: IMAGE_VERSION -          value: ${IMAGE_VERSION} -        - name: MASTER_URL -          value: ${MASTER_URL} -        - name: MODE -          value: ${MODE} -        - name: CONTINUE_ON_ERROR -          value: ${CONTINUE_ON_ERROR} -        - name: REDEPLOY -          value: ${REDEPLOY} -        - name: IGNORE_PREFLIGHT -          value: ${IGNORE_PREFLIGHT} -        - name: USE_PERSISTENT_STORAGE -          value: ${USE_PERSISTENT_STORAGE} -        - name: DYNAMICALLY_PROVISION_STORAGE -          value: ${DYNAMICALLY_PROVISION_STORAGE} -        - name: HAWKULAR_METRICS_HOSTNAME -          value: ${HAWKULAR_METRICS_HOSTNAME} -        - name: CASSANDRA_NODES -          value: ${CASSANDRA_NODES} -        - name: CASSANDRA_PV_SIZE -          value: ${CASSANDRA_PV_SIZE} -        - name: METRIC_DURATION -          value: ${METRIC_DURATION} -        - name: USER_WRITE_ACCESS -          value: ${USER_WRITE_ACCESS} -        - name: HEAPSTER_NODE_ID -          value: ${HEAPSTER_NODE_ID} -        - name: METRIC_RESOLUTION -          value: ${METRIC_RESOLUTION} -        - name: STARTUP_TIMEOUT -          value: ${STARTUP_TIMEOUT} -    dnsPolicy: ClusterFirst -    restartPolicy: Never -    serviceAccount: metrics-deployer -    volumes: -    - name: empty -      emptyDir: {} -    - name: secret -      secret: -        secretName: metrics-deployer -parameters: -- -  description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' -  name: IMAGE_PREFIX -  value: "openshift/origin-" -- -  description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' -  name: IMAGE_VERSION -  value: "latest" -- -  description: "Internal URL for the master, for authentication retrieval" -  name: MASTER_URL -  value: "https://kubernetes.default.svc:443" -- -  description: "External hostname where clients will reach Hawkular Metrics" -  name: HAWKULAR_METRICS_HOSTNAME -  required: true -- -  description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" -  name: MODE -  value: "deploy" --  -  description: "Set to true to continue even if the deployer runs into an error." -  name: CONTINUE_ON_ERROR -  value: "false" -- -  description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)" -  name: REDEPLOY -  value: "false" -- -  description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." -  name: IGNORE_PREFLIGHT -  value: "false" -- -  description: "Set to true for persistent storage, set to false to use non persistent storage" -  name: USE_PERSISTENT_STORAGE -  value: "true" -- -  description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" -  name: DYNAMICALLY_PROVISION_STORAGE -  value: "false" -- -  description: "The number of Cassandra Nodes to deploy for the initial cluster" -  name: CASSANDRA_NODES -  value: "1" -- -  description: "The persistent volume size for each of the Cassandra nodes" -  name: CASSANDRA_PV_SIZE -  value: "10Gi" -- -  description: "How many days metrics should be stored for." -  name: METRIC_DURATION -  value: "7" -- -  description: "If a user accounts should be allowed to write metrics." -  name: USER_WRITE_ACCESS -  value: "false" -- -  description: "The identifier used when generating metric ids in Hawkular" -  name: HEAPSTER_NODE_ID -  value: "nodename" -- -  description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds" -  name: METRIC_RESOLUTION -  value: "30s" -- -  description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" -  name: STARTUP_TIMEOUT -  value: "500" diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 96ed44011..5ee8d1e2a 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -119,6 +119,15 @@ openshift_logging_es_ops_number_of_replicas: 0  # storage related defaults  openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}" +# mux - secure_forward listener service +openshift_logging_mux_allow_external: False +openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" +# this tells the fluentd node agent to use mux instead of sending directly to Elasticsearch +openshift_logging_use_mux_client: False +openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_mux_port: 24284 +openshift_logging_mux_cpu_limit: 100m +openshift_logging_mux_memory_limit: 512Mi  # following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly  #es_logging_contents: @@ -127,3 +136,5 @@ openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_acc  #fluentd_config_contents:  #fluentd_throttle_contents:  #fluentd_secureforward_contents: +#fluentd_mux_config_contents: +#fluentd_mux_secureforward_contents: diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 188ea246c..2f5b68b4d 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -44,6 +44,7 @@      - logging-kibana      - logging-kibana-proxy      - logging-curator +    - logging-mux    ignore_errors: yes    register: delete_result    changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 @@ -109,5 +110,6 @@      - logging-curator      - logging-elasticsearch      - logging-fluentd +    - logging-mux    register: delete_result    changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 740e490e1..b34df018d 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -45,6 +45,21 @@      - procure_component: kibana-internal        hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}" +- include: procure_server_certs.yaml +  loop_control: +    loop_var: cert_info +  with_items: +    - procure_component: mux +      hostnames: "logging-mux, {{openshift_logging_mux_hostname}}" +  when: openshift_logging_use_mux + +- include: procure_shared_key.yaml +  loop_control: +    loop_var: shared_key_info +  with_items: +    - procure_component: mux +  when: openshift_logging_use_mux +  - name: Copy proxy TLS configuration file    copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json    when: server_tls_json is undefined @@ -85,6 +100,14 @@    loop_control:      loop_var: node_name +- name: Generate PEM cert for mux +  include: generate_pems.yaml component={{node_name}} +  with_items: +    - system.logging.mux +  loop_control: +    loop_var: node_name +  when: openshift_logging_use_mux +  - name: Creating necessary JKS certs    include: generate_jks.yaml diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml index 253543f54..44bd0058a 100644 --- a/roles/openshift_logging/tasks/generate_configmaps.yaml +++ b/roles/openshift_logging/tasks/generate_configmaps.yaml @@ -134,3 +134,43 @@        when: fluentd_configmap.stdout is defined        changed_when: no    check_mode: no + +- block: +    - copy: +        src: fluent.conf +        dest: "{{mktemp.stdout}}/fluent-mux.conf" +      when: fluentd_mux_config_contents is undefined +      changed_when: no + +    - copy: +        src: secure-forward.conf +        dest: "{{mktemp.stdout}}/secure-forward-mux.conf" +      when: fluentd_mux_securefoward_contents is undefined +      changed_when: no + +    - copy: +        content: "{{fluentd_mux_config_contents}}" +        dest: "{{mktemp.stdout}}/fluent-mux.conf" +      when: fluentd_mux_config_contents is defined +      changed_when: no + +    - copy: +        content: "{{fluentd_mux_secureforward_contents}}" +        dest: "{{mktemp.stdout}}/secure-forward-mux.conf" +      when: fluentd_mux_secureforward_contents is defined +      changed_when: no + +    - command: > +        {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-mux +        --from-file=fluent.conf={{mktemp.stdout}}/fluent-mux.conf +        --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward-mux.conf -o yaml --dry-run +      register: mux_configmap +      changed_when: no + +    - copy: +        content: "{{mux_configmap.stdout}}" +        dest: "{{mktemp.stdout}}/templates/logging-mux-configmap.yaml" +      when: mux_configmap.stdout is defined +      changed_when: no +  check_mode: no +  when: openshift_logging_use_mux diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml index 124040a4a..c1da49fd8 100644 --- a/roles/openshift_logging/tasks/generate_secrets.yaml +++ b/roles/openshift_logging/tasks/generate_secrets.yaml @@ -34,6 +34,36 @@    check_mode: no    changed_when: no +- name: Retrieving the cert to use when generating secrets for mux +  slurp: src="{{generated_certs_dir}}/{{item.file}}" +  register: mux_key_pairs +  with_items: +    - { name: "ca_file", file: "ca.crt" } +    - { name: "mux_key", file: "system.logging.mux.key"} +    - { name: "mux_cert", file: "system.logging.mux.crt"} +    - { name: "mux_shared_key", file: "mux_shared_key"} +  when: openshift_logging_use_mux + +- name: Generating secrets for mux +  template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml +  vars: +    secret_name: "logging-{{component}}" +    secret_key_file: "{{component}}_key" +    secret_cert_file: "{{component}}_cert" +    secrets: +      - {key: ca, value: "{{mux_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"} +      - {key: key, value: "{{mux_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"} +      - {key: cert, value: "{{mux_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"} +      - {key: shared_key, value: "{{mux_key_pairs | entry_from_named_pair('mux_shared_key')| b64decode }}"} +    secret_keys: ["ca", "cert", "key", "shared_key"] +  with_items: +    - mux +  loop_control: +    loop_var: component +  check_mode: no +  changed_when: no +  when: openshift_logging_use_mux +  - name: Generating secrets for kibana proxy    template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml    vars: diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml index 5091c1209..e3a5c5eb3 100644 --- a/roles/openshift_logging/tasks/generate_services.yaml +++ b/roles/openshift_logging/tasks/generate_services.yaml @@ -85,3 +85,35 @@    when: openshift_logging_use_ops | bool    check_mode: no    changed_when: no + +- name: Generating logging-mux service for external connections +  template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml +  vars: +    obj_name: logging-mux +    ports: +    - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward} +    labels: +      logging-infra: support +    selector: +      provider: openshift +      component: mux +    externalIPs: +    - "{{ ansible_eth0.ipv4.address }}" +  check_mode: no +  changed_when: no +  when: openshift_logging_mux_allow_external + +- name: Generating logging-mux service for intra-cluster connections +  template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml +  vars: +    obj_name: logging-mux +    ports: +    - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward} +    labels: +      logging-infra: support +    selector: +      provider: openshift +      component: mux +  check_mode: no +  changed_when: no +  when: openshift_logging_use_mux and not openshift_logging_mux_allow_external diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 83b68fa77..aec455c22 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -27,6 +27,10 @@    loop_control:      loop_var: install_component +- name: Install logging mux +  include: "{{ role_path }}/tasks/install_mux.yaml" +  when: openshift_logging_use_mux +  - find: paths={{ mktemp.stdout }}/templates patterns=*.yaml    register: object_def_files    changed_when: no diff --git a/roles/openshift_logging/tasks/install_mux.yaml b/roles/openshift_logging/tasks/install_mux.yaml new file mode 100644 index 000000000..296da626f --- /dev/null +++ b/roles/openshift_logging/tasks/install_mux.yaml @@ -0,0 +1,67 @@ +--- +- set_fact: mux_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }} +  check_mode: no + +- set_fact: mux_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }} +  check_mode: no + +- name: Check mux current replica count +  command: > +    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-mux +    -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} +  register: mux_replica_count +  when: not ansible_check_mode +  ignore_errors: yes +  changed_when: no + +- name: Generating mux deploymentconfig +  template: src=mux.j2 dest={{mktemp.stdout}}/templates/logging-mux-dc.yaml +  vars: +    component: mux +    logging_component: mux +    deploy_name: "logging-{{component}}" +    image: "{{openshift_logging_image_prefix}}logging-fluentd:{{openshift_logging_image_version}}" +    es_host: logging-es +    es_port: "{{openshift_logging_es_port}}" +    ops_host: "{{ mux_ops_host }}" +    ops_port: "{{ mux_ops_port }}" +    mux_cpu_limit: "{{openshift_logging_mux_cpu_limit}}" +    mux_memory_limit: "{{openshift_logging_mux_memory_limit}}" +    replicas: "{{mux_replica_count.stdout | default (0)}}" +    mux_node_selector: "{{openshift_logging_mux_nodeselector | default({})}}" +  check_mode: no +  changed_when: no + +- name: "Check mux hostmount-anyuid permissions" +  command: > +    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig +    get scc/hostmount-anyuid -o jsonpath='{.users}' +  register: mux_hostmount_anyuid +  check_mode: no +  changed_when: no + +- name: "Set hostmount-anyuid permissions for mux" +  command: > +    {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy +    add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd +  register: mux_output +  failed_when: "mux_output.rc == 1 and 'exists' not in mux_output.stderr" +  check_mode: no +  when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 + +- name: "Check mux cluster-reader permissions" +  command: > +    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig +    get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}' +  register: mux_cluster_reader +  check_mode: no +  changed_when: no + +- name: "Set cluster-reader permissions for mux" +  command: > +    {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy +    add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd +  register: mux2_output +  failed_when: "mux2_output.rc == 1 and 'exists' not in mux2_output.stderr" +  check_mode: no +  when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 diff --git a/roles/openshift_logging/tasks/procure_shared_key.yaml b/roles/openshift_logging/tasks/procure_shared_key.yaml new file mode 100644 index 000000000..056ff6b98 --- /dev/null +++ b/roles/openshift_logging/tasks/procure_shared_key.yaml @@ -0,0 +1,25 @@ +--- +- name: Checking for {{ shared_key_info.procure_component }}_shared_key +  stat: path="{{generated_certs_dir}}/{{ shared_key_info.procure_component }}_shared_key" +  register: component_shared_key_file +  check_mode: no + +- name: Trying to discover shared key variable name for {{ shared_key_info.procure_component }} +  set_fact: procure_component_shared_key={{ lookup('env', '{{shared_key_info.procure_component}}' + '_shared_key') }} +  when: +  - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined +  check_mode: no + +- name: Creating shared_key for {{ shared_key_info.procure_component }} +  copy: content="{{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}" +        dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key" +  check_mode: no +  when: +  - not component_shared_key_file.stat.exists + +- name: Copying shared key for {{ shared_key_info.procure_component }} to generated certs directory +  copy: content="{{procure_component_shared_key}}" dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key" +  check_mode: no +  when: +  - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined +  - not component_shared_key_file.stat.exists diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml index edbb62c3e..1042b3daa 100644 --- a/roles/openshift_logging/tasks/start_cluster.yaml +++ b/roles/openshift_logging/tasks/start_cluster.yaml @@ -21,6 +21,26 @@    loop_control:      loop_var: fluentd_host +- name: Retrieve mux +  oc_obj: +    state: list +    kind: dc +    selector: "component=mux" +    namespace: "{{openshift_logging_namespace}}" +  register: mux_dc +  when: openshift_logging_use_mux + +- name: start mux +  oc_scale: +    kind: dc +    name: "{{ object }}" +    namespace: "{{openshift_logging_namespace}}" +    replicas: "{{ openshift_logging_mux_replica_count | default (1) }}" +  with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" +  loop_control: +    loop_var: object +  when: openshift_logging_use_mux +  - name: Retrieve elasticsearch    oc_obj:      state: list diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml index 4b3722e29..d20c57cc1 100644 --- a/roles/openshift_logging/tasks/stop_cluster.yaml +++ b/roles/openshift_logging/tasks/stop_cluster.yaml @@ -21,6 +21,26 @@    loop_control:      loop_var: fluentd_host +- name: Retrieve mux +  oc_obj: +    state: list +    kind: dc +    selector: "component=mux" +    namespace: "{{openshift_logging_namespace}}" +  register: mux_dc +  when: openshift_logging_use_mux + +- name: stop mux +  oc_scale: +    kind: dc +    name: "{{ object }}" +    namespace: "{{openshift_logging_namespace}}" +    replicas: 0 +  with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" +  loop_control: +    loop_var: object +  when: openshift_logging_use_mux +  - name: Retrieve elasticsearch    oc_obj:      state: list diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2 index 16185fc1d..f89855bf5 100644 --- a/roles/openshift_logging/templates/es.j2 +++ b/roles/openshift_logging/templates/es.j2 @@ -95,6 +95,13 @@ spec:                readOnly: true              - name: elasticsearch-storage                mountPath: /elasticsearch/persistent +          readinessProbe: +            exec: +              command: +              - "/usr/share/elasticsearch/probe/readiness.sh" +            initialDelaySeconds: 5 +            timeoutSeconds: 4 +            periodSeconds: 5        volumes:          - name: elasticsearch            secret: diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2 index 0bf1686ad..d13691259 100644 --- a/roles/openshift_logging/templates/fluentd.j2 +++ b/roles/openshift_logging/templates/fluentd.j2 @@ -59,6 +59,11 @@ spec:          - name: dockercfg            mountPath: /etc/sysconfig/docker            readOnly: true +{% if openshift_logging_use_mux_client | bool %} +        - name: muxcerts +          mountPath: /etc/fluent/muxkeys +          readOnly: true +{% endif %}          env:          - name: "K8S_HOST_URL"            value: "{{openshift_logging_master_url}}" @@ -122,6 +127,8 @@ spec:            value: "{{openshift_logging_fluentd_journal_source | default('')}}"          - name: "JOURNAL_READ_FROM_HEAD"            value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}" +        - name: "USE_MUX_CLIENT" +          value: "{{openshift_logging_use_mux_client| default('false')}}"        volumes:        - name: runlogjournal          hostPath: @@ -147,3 +154,8 @@ spec:        - name: dockercfg          hostPath:            path: /etc/sysconfig/docker +{% if openshift_logging_use_mux_client | bool %} +      - name: muxcerts +        secret: +          secretName: logging-mux +{% endif %} diff --git a/roles/openshift_logging/templates/mux.j2 b/roles/openshift_logging/templates/mux.j2 new file mode 100644 index 000000000..41e6abd52 --- /dev/null +++ b/roles/openshift_logging/templates/mux.j2 @@ -0,0 +1,121 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: +  name: "{{deploy_name}}" +  labels: +    provider: openshift +    component: "{{component}}" +    logging-infra: "{{logging_component}}" +spec: +  replicas: {{replicas|default(0)}} +  selector: +    provider: openshift +    component: "{{component}}" +    logging-infra: "{{logging_component}}" +  strategy: +    rollingParams: +      intervalSeconds: 1 +      timeoutSeconds: 600 +      updatePeriodSeconds: 1 +    type: Rolling +  template: +    metadata: +      name: "{{deploy_name}}" +      labels: +        logging-infra: "{{logging_component}}" +        provider: openshift +        component: "{{component}}" +    spec: +      serviceAccountName: aggregated-logging-fluentd +{% if mux_node_selector is iterable and mux_node_selector | length > 0 %} +      nodeSelector: +{% for key, value in mux_node_selector.iteritems() %} +        {{key}}: "{{value}}" +{% endfor %} +{% endif %} +      containers: +      - name: "mux" +        image: {{image}} +        imagePullPolicy: Always +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %} +        resources: +          limits: +{% if mux_cpu_limit is not none %} +            cpu: "{{mux_cpu_limit}}" +{% endif %} +{% if mux_memory_limit is not none %} +            memory: "{{mux_memory_limit}}" +{% endif %} +{% endif %} +        ports: +        - containerPort: "{{ openshift_logging_mux_port }}" +          name: mux-forward +        volumeMounts: +        - name: config +          mountPath: /etc/fluent/configs.d/user +          readOnly: true +        - name: certs +          mountPath: /etc/fluent/keys +          readOnly: true +        - name: dockerhostname +          mountPath: /etc/docker-hostname +          readOnly: true +        - name: localtime +          mountPath: /etc/localtime +          readOnly: true +        - name: muxcerts +          mountPath: /etc/fluent/muxkeys +          readOnly: true +        env: +        - name: "K8S_HOST_URL" +          value: "{{openshift_logging_master_url}}" +        - name: "ES_HOST" +          value: "{{openshift_logging_es_host}}" +        - name: "ES_PORT" +          value: "{{openshift_logging_es_port}}" +        - name: "ES_CLIENT_CERT" +          value: "{{openshift_logging_es_client_cert}}" +        - name: "ES_CLIENT_KEY" +          value: "{{openshift_logging_es_client_key}}" +        - name: "ES_CA" +          value: "{{openshift_logging_es_ca}}" +        - name: "OPS_HOST" +          value: "{{ops_host}}" +        - name: "OPS_PORT" +          value: "{{ops_port}}" +        - name: "OPS_CLIENT_CERT" +          value: "{{openshift_logging_es_ops_client_cert}}" +        - name: "OPS_CLIENT_KEY" +          value: "{{openshift_logging_es_ops_client_key}}" +        - name: "OPS_CA" +          value: "{{openshift_logging_es_ops_ca}}" +        - name: "USE_JOURNAL" +          value: "false" +        - name: "JOURNAL_SOURCE" +          value: "{{openshift_logging_fluentd_journal_source | default('')}}" +        - name: "JOURNAL_READ_FROM_HEAD" +          value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}" +        - name: FORWARD_LISTEN_HOST +          value: "{{ openshift_logging_mux_hostname }}" +        - name: FORWARD_LISTEN_PORT +          value: "{{ openshift_logging_mux_port }}" +        - name: USE_MUX +          value: "true" +        - name: MUX_ALLOW_EXTERNAL +          value: "{{ openshift_logging_mux_allow_external| default('false') }}" +      volumes: +      - name: config +        configMap: +          name: logging-mux +      - name: certs +        secret: +          secretName: logging-fluentd +      - name: dockerhostname +        hostPath: +          path: /etc/hostname +      - name: localtime +        hostPath: +          path: /etc/localtime +      - name: muxcerts +        secret: +          secretName: logging-mux diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2 index 6c4ec0c76..70644a39c 100644 --- a/roles/openshift_logging/templates/service.j2 +++ b/roles/openshift_logging/templates/service.j2 @@ -26,3 +26,9 @@ spec:    {% for key, value in selector.iteritems() %}    {{key}}: {{value}}    {% endfor %} +{% if externalIPs is defined -%} +  externalIPs: +{% for ip in externalIPs %} +  - {{ ip }} +{% endfor %} +{% endif %} diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml index 9679d209a..92e68a0a3 100644 --- a/roles/openshift_logging/vars/openshift-enterprise.yml +++ b/roles/openshift_logging/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@  ---  __openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default(openshift_release | default ('3.5.0') ) }}" +__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('3.6.0') }}" diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index f202486a5..cfc4e2722 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -3,24 +3,13 @@      msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1."    when: not openshift.common.version_gte_3_1_or_1_1 | bool -- name: Copy Configuration to temporary conf -  command: > -    cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}} -  changed_when: false -  - name: Add Management Infrastructure project -  command: > -    {{ openshift.common.client_binary }} adm new-project -    management-infra -    --description="Management Infrastructure" -    --config={{manage_iq_tmp_conf}} -  register: osmiq_create_mi_project -  failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0" -  changed_when: osmiq_create_mi_project.rc == 0 +  oc_project: +    name: management-infra +    description: Management Infrastructure  - name: Create Admin and Image Inspector Service Account    oc_serviceaccount: -    kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"      name: "{{ item }}"      namespace: management-infra      state: present @@ -28,51 +17,42 @@    - management-admin    - inspector-admin -- name: Create Cluster Role -  shell: > -    echo {{ manageiq_cluster_role | to_json | quote }} | -    {{ openshift.common.client_binary }} create -    --config={{manage_iq_tmp_conf}} -    -f - -  register: osmiq_create_cluster_role -  failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0" -  changed_when: osmiq_create_cluster_role.rc == 0 +- name: Create manageiq cluster role +  oc_clusterrole: +    name: management-infra-admin +    rules: +    - apiGroups: +      - "" +      resources: +      - pods/proxy +      verbs: +      - "*"  - name: Create Hawkular Metrics Admin Cluster Role -  shell: > -    echo {{ manageiq_metrics_admin_clusterrole | to_json | quote }} | -    {{ openshift.common.client_binary }} -    --config={{manage_iq_tmp_conf}} -    create -f - -  register: oshawkular_create_cluster_role -  failed_when: "'already exists' not in oshawkular_create_cluster_role.stderr and oshawkular_create_cluster_role.rc != 0" -  changed_when: oshawkular_create_cluster_role.rc == 0 -  # AUDIT:changed_when_note: Checking the return code is insufficient -  # here. We really need to verify the if the role even exists before -  # we run this task. +  oc_clusterrole: +    name: hawkular-metrics-admin +    rules: +    - apiGroups: +      - "" +      resources: +      - hawkular-alerts +      - hawkular-metrics +      verbs: +      - "*"  - name: Configure role/user permissions -  command: > -    {{ openshift.common.client_binary }} adm {{item}} -    --config={{manage_iq_tmp_conf}} -  with_items: "{{manage_iq_tasks}}" -  register: osmiq_perm_task -  failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0" -  changed_when: osmiq_perm_task.rc == 0 -  # AUDIT:changed_when_note: Checking the return code is insufficient -  # here. We really need to compare the current role/user permissions -  # with their expected state. I think we may have a module for this? - +  oc_adm_policy_user: +    namespace: management-infra +    resource_name: "{{ item.resource_name }}" +    resource_kind: "{{ item.resource_kind }}" +    user: "{{ item.user }}" +  with_items: "{{ manage_iq_tasks }}"  - name: Configure 3_2 role/user permissions -  command: > -    {{ openshift.common.client_binary }} adm {{item}} -    --config={{manage_iq_tmp_conf}} +  oc_adm_policy_user: +    namespace: management-infra +    resource_name: "{{ item.resource_name }}" +    resource_kind: "{{ item.resource_kind }}" +    user: "{{ item.user }}"    with_items: "{{manage_iq_openshift_3_2_tasks}}" -  register: osmiq_perm_3_2_task -  failed_when: osmiq_perm_3_2_task.rc != 0 -  changed_when: osmiq_perm_3_2_task.rc == 0    when: openshift.common.version_gte_3_2_or_1_2 | bool - -- name: Clean temporary configuration file -  file: path={{manage_iq_tmp_conf}} state=absent diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml index 9936bb126..15d667628 100644 --- a/roles/openshift_manageiq/vars/main.yml +++ b/roles/openshift_manageiq/vars/main.yml @@ -1,41 +1,31 @@  --- -openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -manageiq_cluster_role: -  apiVersion: v1 -  kind: ClusterRole -  metadata: -    name: management-infra-admin -  rules: -  - resources: -    - pods/proxy -    verbs: -    - '*' - -manageiq_metrics_admin_clusterrole: -  apiVersion: v1 -  kind: ClusterRole -  metadata: -    name: hawkular-metrics-admin -  rules: -  - apiGroups: -    - "" -    resources: -    - hawkular-metrics -    - hawkular-alerts -    verbs: -    - '*' - -manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig -  manage_iq_tasks: -- policy add-role-to-user -n management-infra admin -z management-admin -- policy add-role-to-user -n management-infra management-infra-admin -z management-admin -- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin -- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin -- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin -- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin -- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin -- policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin +- resource_kind: role +  resource_name: admin +  user: management-admin +- resource_kind: role +  resource_name: management-infra-admin +  user: management-admin +- resource_kind: cluster-role +  resource_name: cluster-reader +  user: system:serviceaccount:management-infra:management-admin +- resource_kind: scc +  resource_name: privileged +  user: system:serviceaccount:management-infra:management-admin +- resource_kind: cluster-role +  resource_name: system:image-puller +  user: system:serviceaccount:management-infra:inspector-admin +- resource_kind: scc +  resource_name: privileged +  user: system:serviceaccount:management-infra:inspector-admin +- resource_kind: cluster-role +  resource_name: self-provisioner +  user: system:serviceaccount:management-infra:management-admin +- resource_kind: cluster-role +  resource_name: hawkular-metrics-admin +  user: system:serviceaccount:management-infra:management-admin  manage_iq_openshift_3_2_tasks: -- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin +- resource_kind: cluster-role +  resource_name: system:image-auditor +  user: system:serviceaccount:management-infra:management-admin diff --git a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml b/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml deleted file mode 100644 index ced2df1d0..000000000 --- a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- -- name: generate heapster key/cert -  command: > -    {{ openshift.common.admin_binary }} ca create-server-cert -    --config={{ mktemp.stdout }}/admin.kubeconfig -    --key='{{ mktemp.stdout }}/heapster.key' -    --cert='{{ mktemp.stdout }}/heapster.cert' -    --hostnames=heapster -    --signer-cert='{{ mktemp.stdout }}/ca.crt' -    --signer-key='{{ mktemp.stdout }}/ca.key' -    --signer-serial='{{ mktemp.stdout }}/ca.serial.txt' - -- when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines" -  block: -  - name: read files for the heapster secret -    slurp: src={{ item }} -    register: heapster_secret -    with_items: -    - "{{ mktemp.stdout }}/heapster.cert" -    - "{{ mktemp.stdout }}/heapster.key" -    - "{{ client_ca }}" -    vars: -      custom_ca: "{{ mktemp.stdout }}/heapster_client_ca.crt" -      default_ca: "{{ openshift.common.config_base }}/master/ca-bundle.crt" -      client_ca: "{{ custom_ca|exists|ternary(custom_ca, default_ca) }}" -  - name: generate heapster secret template -    template: -      src: secret.j2 -      dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml" -      force: no -    vars: -      name: heapster-secrets -      labels: -        metrics-infra: heapster -      data: -        heapster.cert: "{{ heapster_secret.results[0].content }}" -        heapster.key: "{{ heapster_secret.results[1].content }}" -        heapster.client-ca: "{{ heapster_secret.results[2].content }}" -        heapster.allowed-users: > -          {{ openshift_metrics_heapster_allowed_users|b64encode }} diff --git a/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml new file mode 100644 index 000000000..e81d90ae7 --- /dev/null +++ b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml @@ -0,0 +1,14 @@ +--- +- name: generate heapster secret template +  template: +    src: secret.j2 +    dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml" +    force: no +  vars: +    name: heapster-secrets +    labels: +      metrics-infra: heapster +    data: +      heapster.allowed-users: > +        {{ openshift_metrics_heapster_allowed_users|b64encode }} +  when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines" diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml index 8d27c4930..d13b96be1 100644 --- a/roles/openshift_metrics/tasks/install_heapster.yaml +++ b/roles/openshift_metrics/tasks/install_heapster.yaml @@ -41,6 +41,8 @@        - {port: 80, targetPort: http-endpoint}      selector:        name: "{{obj_name}}" +    annotations: +      service.alpha.openshift.io/serving-cert-secret-name: heapster-certs      labels:        metrics-infra: "{{obj_name}}"        name: "{{obj_name}}" @@ -64,4 +66,4 @@          namespace: "{{ openshift_metrics_project }}"    changed_when: no -- include: generate_heapster_certificates.yaml +- include: generate_heapster_secrets.yaml diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2 index f01ccfd58..ab998c2fb 100644 --- a/roles/openshift_metrics/templates/heapster.j2 +++ b/roles/openshift_metrics/templates/heapster.j2 @@ -34,9 +34,9 @@ spec:          - "heapster-wrapper.sh"          - "--wrapper.allowed_users_file=/secrets/heapster.allowed-users"          - "--source=kubernetes.summary_api:${MASTER_URL}?useServiceAccount=true&kubeletHttps=true&kubeletPort=10250" -        - "--tls_cert=/secrets/heapster.cert" -        - "--tls_key=/secrets/heapster.key" -        - "--tls_client_ca=/secrets/heapster.client-ca" +        - "--tls_cert=/heapster-certs/tls.crt" +        - "--tls_key=/heapster-certs/tls.key" +        - "--tls_client_ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"          - "--allowed_users=%allowed_users%"          - "--metric_resolution={{openshift_metrics_resolution}}"  {% if not openshift_metrics_heapster_standalone %} @@ -80,6 +80,8 @@ spec:          volumeMounts:          - name: heapster-secrets            mountPath: "/secrets" +        - name: heapster-certs +          mountPath: "/heapster-certs"  {% if not openshift_metrics_heapster_standalone %}          - name: hawkular-metrics-certs            mountPath: "/hawkular-metrics-certs" @@ -94,6 +96,9 @@ spec:          - name: heapster-secrets            secret:              secretName: heapster-secrets +        - name: heapster-certs +          secret: +            secretName: heapster-certs  {% if not openshift_metrics_heapster_standalone %}          - name: hawkular-metrics-certs            secret: diff --git a/roles/openshift_metrics/templates/service.j2 b/roles/openshift_metrics/templates/service.j2 index 8df89127b..ce0bc2eec 100644 --- a/roles/openshift_metrics/templates/service.j2 +++ b/roles/openshift_metrics/templates/service.j2 @@ -2,6 +2,12 @@ apiVersion: "v1"  kind: "Service"  metadata:    name: "{{obj_name}}" +{% if annotations is defined%} +  annotations: +{% for key, value in annotations.iteritems() %} +    {{key}}: {{value}} +{% endfor %} +{% endif %}  {% if labels is defined%}    labels:  {% for key, value in labels.iteritems() %} diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml index f28c3ce48..b20957550 100644 --- a/roles/openshift_metrics/vars/openshift-enterprise.yml +++ b/roles/openshift_metrics/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@  ---  __openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default(openshift_release | default ('3.5.0') ) }}" +__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('3.6.0') }}" diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 626248306..98139cac2 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -34,6 +34,38 @@          dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"          env_vars: "{{ openshift_node_env_vars | default(None) }}" +# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory +- name: Check for swap usage +  command: grep "^[^#].*swap" /etc/fstab +  # grep: match any lines which don't begin with '#' and contain 'swap' +  changed_when: false +  failed_when: false +  register: swap_result + +# Disable Swap Block +- block: + +    - name: Disable swap +      command: swapoff --all + +    - name: Remove swap entries from /etc/fstab +      replace: +        dest: /etc/fstab +        regexp: '(^[^#].*swap.*)' +        replace: '# \1' +        backup: yes + +    - name: Add notice about disabling swap +      lineinfile: +        dest: /etc/fstab +        line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' +        state: present + +  when: +    - swap_result.stdout_lines | length > 0 +    - openshift_disable_swap | default(true) +# End Disable Swap Block +  # We have to add tuned-profiles in the same transaction otherwise we run into depsolving  # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.  - name: Install Node package diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml index cd2f362aa..2a36d8945 100644 --- a/roles/openshift_node_upgrade/meta/main.yml +++ b/roles/openshift_node_upgrade/meta/main.yml @@ -10,4 +10,5 @@ galaxy_info:      versions:      - 7  dependencies: +- role: lib_utils  - role: openshift_common diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml index e91891ca9..416cf605a 100644 --- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml +++ b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml @@ -6,20 +6,6 @@  # - docker_version  # - skip_docker_restart -# We need docker service up to remove all the images, but these services will keep -# trying to re-start and thus re-pull the images we're trying to delete. -- name: Stop containerized services -  service: name={{ item }} state=stopped -  with_items: -    - "{{ openshift.common.service_type }}-master" -    - "{{ openshift.common.service_type }}-master-api" -    - "{{ openshift.common.service_type }}-master-controllers" -    - "{{ openshift.common.service_type }}-node" -    - etcd_container -    - openvswitch -  failed_when: false -  when: openshift.common.is_containerized | bool -  - name: Check Docker image count    shell: "docker images -aq | wc -l"    register: docker_image_count @@ -45,5 +31,4 @@  - name: Upgrade Docker    package: name=docker{{ '-' + docker_version }} state=present -- include: restart.yml -  when: not skip_docker_restart | default(False) | bool +# starting docker happens back in ../main.yml where it calls ../restart.yml diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index 6ae8dbc12..e725f4a5d 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -9,6 +9,28 @@  # - openshift_release  # tasks file for openshift_node_upgrade + +- name: Stop node and openvswitch services +  service: +    name: "{{ item }}" +    state: stopped +  with_items: +  - "{{ openshift.common.service_type }}-node" +  - openvswitch +  failed_when: false + +- name: Stop additional containerized services +  service: +    name: "{{ item }}" +    state: stopped +  with_items: +  - "{{ openshift.common.service_type }}-master" +  - "{{ openshift.common.service_type }}-master-controllers" +  - "{{ openshift.common.service_type }}-master-api" +  - etcd_container +  failed_when: false +  when: openshift.common.is_containerized | bool +  - include: docker/upgrade.yml    vars:      # We will restart Docker ourselves after everything is ready: @@ -16,7 +38,6 @@    when:    - l_docker_upgrade is defined    - l_docker_upgrade | bool -  - not openshift.common.is_containerized | bool  - include: "{{ node_config_hook }}"    when: node_config_hook is defined @@ -67,16 +88,6 @@      state: latest    when: not openshift.common.is_containerized | bool -- name: Restart openvswitch -  systemd: -    name: openvswitch -    state: started -  when: -  - not openshift.common.is_containerized | bool - -# Mandatory Docker restart, ensure all containerized services are running: -- include: docker/restart.yml -  - name: Update oreg value    yedit:      src: "{{ openshift.common.config_base }}/node/node-config.yaml" @@ -84,11 +95,40 @@      value: "{{ oreg_url }}"    when: oreg_url is defined -- name: Restart rpm node service -  service: -    name: "{{ openshift.common.service_type }}-node" -    state: restarted -  when: not openshift.common.is_containerized | bool +# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory +- name: Check for swap usage +  command: grep "^[^#].*swap" /etc/fstab +  # grep: match any lines which don't begin with '#' and contain 'swap' +  changed_when: false +  failed_when: false +  register: swap_result + +  # Disable Swap Block +- block: + +  - name: Disable swap +    command: swapoff --all + +  - name: Remove swap entries from /etc/fstab +    replace: +      dest: /etc/fstab +      regexp: '(^[^#].*swap.*)' +      replace: '# \1' +      backup: yes + +  - name: Add notice about disabling swap +    lineinfile: +      dest: /etc/fstab +      line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' +      state: present + +  when: +  - swap_result.stdout_lines | length > 0 +  - openshift_disable_swap | default(true) +  # End Disable Swap Block + +# Restart all services +- include: restart.yml  - name: Wait for node to be ready    oc_obj: diff --git a/roles/openshift_node_upgrade/tasks/docker/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml index 176fc3c0b..a9fab74e1 100644 --- a/roles/openshift_node_upgrade/tasks/docker/restart.yml +++ b/roles/openshift_node_upgrade/tasks/restart.yml @@ -12,7 +12,7 @@    openshift_facts:      role: docker -- name: Restart containerized services +- name: Start services    service: name={{ item }} state=started    with_items:      - etcd_container @@ -22,7 +22,6 @@      - "{{ openshift.common.service_type }}-master-controllers"      - "{{ openshift.common.service_type }}-node"    failed_when: false -  when: openshift.common.is_containerized | bool  - name: Wait for master API to come back online    wait_for: diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md new file mode 100644 index 000000000..cf0fb94c9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/README.md @@ -0,0 +1,60 @@ +OpenShift GlusterFS Cluster +=========================== + +OpenShift GlusterFS Cluster Installation + +Requirements +------------ + +* Ansible 2.2 + +Role Variables +-------------- + +From this role: + +| Name                                             | Default value           |                                         | +|--------------------------------------------------|-------------------------|-----------------------------------------| +| openshift_storage_glusterfs_timeout              | 300                     | Seconds to wait for pods to become ready +| openshift_storage_glusterfs_namespace            | 'default'               | Namespace in which to create GlusterFS resources +| openshift_storage_glusterfs_is_native            | True                    | GlusterFS should be containerized +| openshift_storage_glusterfs_nodeselector         | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode +| openshift_storage_glusterfs_image                | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' +| openshift_storage_glusterfs_version              | 'latest'                | Container image version to use for GlusterFS pods +| openshift_storage_glusterfs_wipe                 | False                   | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** +| openshift_storage_glusterfs_heketi_is_native     | True                    | heketi should be containerized +| openshift_storage_glusterfs_heketi_image         | 'heketi/heketi'         | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' +| openshift_storage_glusterfs_heketi_version       | 'latest'                | Container image version to use for heketi pods +| openshift_storage_glusterfs_heketi_admin_key     | ''                      | String to use as secret key for performing heketi commands as admin +| openshift_storage_glusterfs_heketi_user_key      | ''                      | String to use as secret key for performing heketi commands as user that can only view or modify volumes +| openshift_storage_glusterfs_heketi_topology_load | True                    | Load the GlusterFS topology information into heketi +| openshift_storage_glusterfs_heketi_url           | Undefined               | URL for the heketi REST API, dynamically determined in native mode +| openshift_storage_glusterfs_heketi_wipe          | False                   | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` + +Dependencies +------------ + +* os_firewall +* openshift_hosted_facts +* openshift_repos +* lib_openshift + +Example Playbook +---------------- + +``` +- name: Configure GlusterFS hosts +  hosts: oo_first_master +  roles: +  - role: openshift_storage_glusterfs +``` + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Jose A. Rivera (jarrpa@redhat.com) diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml new file mode 100644 index 000000000..ade850747 --- /dev/null +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -0,0 +1,17 @@ +--- +openshift_storage_glusterfs_timeout: 300 +openshift_storage_glusterfs_namespace: 'default' +openshift_storage_glusterfs_is_native: True +openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}" +openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" +openshift_storage_glusterfs_version: 'latest' +openshift_storage_glusterfs_wipe: False +openshift_storage_glusterfs_heketi_is_native: True +openshift_storage_glusterfs_heketi_is_missing: True +openshift_storage_glusterfs_heketi_deploy_is_missing: True +openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" +openshift_storage_glusterfs_heketi_version: 'latest' +openshift_storage_glusterfs_heketi_admin_key: '' +openshift_storage_glusterfs_heketi_user_key: '' +openshift_storage_glusterfs_heketi_topology_load: True +openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}" diff --git a/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml new file mode 100644 index 000000000..c9945be13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml @@ -0,0 +1,115 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: deploy-heketi +  labels: +    glusterfs: heketi-template +    deploy-heketi: support +  annotations: +    description: Bootstrap Heketi installation +    tags: glusterfs,heketi,installation +labels: +  template: deploy-heketi +objects: +- kind: Service +  apiVersion: v1 +  metadata: +    name: deploy-heketi +    labels: +      glusterfs: deploy-heketi-service +      deploy-heketi: support +    annotations: +      description: Exposes Heketi service +  spec: +    ports: +    - name: deploy-heketi +      port: 8080 +      targetPort: 8080 +    selector: +      name: deploy-heketi +- kind: Route +  apiVersion: v1 +  metadata: +    name: deploy-heketi +    labels: +      glusterfs: deploy-heketi-route +      deploy-heketi: support +  spec: +    to: +      kind: Service +      name: deploy-heketi +- kind: DeploymentConfig +  apiVersion: v1 +  metadata: +    name: deploy-heketi +    labels: +      glusterfs: deploy-heketi-dc +      deploy-heketi: support +    annotations: +      description: Defines how to deploy Heketi +  spec: +    replicas: 1 +    selector: +      name: deploy-heketi +    triggers: +    - type: ConfigChange +    strategy: +      type: Recreate +    template: +      metadata: +        name: deploy-heketi +        labels: +          name: deploy-heketi +          glusterfs: deploy-heketi-pod +          deploy-heketi: support +      spec: +        serviceAccountName: heketi-service-account +        containers: +        - name: deploy-heketi +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          env: +          - name: HEKETI_USER_KEY +            value: ${HEKETI_USER_KEY} +          - name: HEKETI_ADMIN_KEY +            value: ${HEKETI_ADMIN_KEY} +          - name: HEKETI_EXECUTOR +            value: kubernetes +          - name: HEKETI_FSTAB +            value: /var/lib/heketi/fstab +          - name: HEKETI_SNAPSHOT_LIMIT +            value: '14' +          - name: HEKETI_KUBE_GLUSTER_DAEMONSET +            value: '1' +          ports: +          - containerPort: 8080 +          volumeMounts: +          - name: db +            mountPath: /var/lib/heketi +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 3 +            httpGet: +              path: /hello +              port: 8080 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 30 +            httpGet: +              path: /hello +              port: 8080 +        volumes: +        - name: db +parameters: +- name: HEKETI_USER_KEY +  displayName: Heketi User Secret +  description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY +  displayName: Heketi Administrator Secret +  description: Set secret for administration of the Heketi service as user _admin_ +- name: IMAGE_NAME +  displayName: GlusterFS container name +  required: True +- name: IMAGE_VERSION +  displayName: GlusterFS container versiona +  required: True diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml new file mode 100644 index 000000000..3f8d8f507 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: +  name: glusterfs-registry-endpoints +spec: +  ports: +  - port: 1 +status: +  loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml new file mode 100644 index 000000000..c66705752 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml @@ -0,0 +1,128 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: glusterfs +  labels: +    glusterfs: template +  annotations: +    description: GlusterFS DaemonSet template +    tags: glusterfs +objects: +- kind: DaemonSet +  apiVersion: extensions/v1beta1 +  metadata: +    name: glusterfs +    labels: +      glusterfs: daemonset +    annotations: +      description: GlusterFS DaemonSet +      tags: glusterfs +  spec: +    selector: +      matchLabels: +        glusterfs-node: pod +    template: +      metadata: +        name: glusterfs +        labels: +          glusterfs-node: pod +      spec: +        nodeSelector: +          storagenode: glusterfs +        hostNetwork: true +        containers: +        - name: glusterfs +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          imagePullPolicy: IfNotPresent +          volumeMounts: +          - name: glusterfs-heketi +            mountPath: "/var/lib/heketi" +          - name: glusterfs-run +            mountPath: "/run" +          - name: glusterfs-lvm +            mountPath: "/run/lvm" +          - name: glusterfs-etc +            mountPath: "/etc/glusterfs" +          - name: glusterfs-logs +            mountPath: "/var/log/glusterfs" +          - name: glusterfs-config +            mountPath: "/var/lib/glusterd" +          - name: glusterfs-dev +            mountPath: "/dev" +          - name: glusterfs-misc +            mountPath: "/var/lib/misc/glusterfsd" +          - name: glusterfs-cgroup +            mountPath: "/sys/fs/cgroup" +            readOnly: true +          - name: glusterfs-ssl +            mountPath: "/etc/ssl" +            readOnly: true +          securityContext: +            capabilities: {} +            privileged: true +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 100 +            exec: +              command: +              - "/bin/bash" +              - "-c" +              - systemctl status glusterd.service +            periodSeconds: 10 +            successThreshold: 1 +            failureThreshold: 3 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 100 +            exec: +              command: +              - "/bin/bash" +              - "-c" +              - systemctl status glusterd.service +            periodSeconds: 10 +            successThreshold: 1 +            failureThreshold: 3 +          resources: {} +          terminationMessagePath: "/dev/termination-log" +        volumes: +        - name: glusterfs-heketi +          hostPath: +            path: "/var/lib/heketi" +        - name: glusterfs-run +          emptyDir: {} +        - name: glusterfs-lvm +          hostPath: +            path: "/run/lvm" +        - name: glusterfs-etc +          hostPath: +            path: "/etc/glusterfs" +        - name: glusterfs-logs +          hostPath: +            path: "/var/log/glusterfs" +        - name: glusterfs-config +          hostPath: +            path: "/var/lib/glusterd" +        - name: glusterfs-dev +          hostPath: +            path: "/dev" +        - name: glusterfs-misc +          hostPath: +            path: "/var/lib/misc/glusterfsd" +        - name: glusterfs-cgroup +          hostPath: +            path: "/sys/fs/cgroup" +        - name: glusterfs-ssl +          hostPath: +            path: "/etc/ssl" +        restartPolicy: Always +        terminationGracePeriodSeconds: 30 +        dnsPolicy: ClusterFirst +        securityContext: {} +parameters: +- name: IMAGE_NAME +  displayName: GlusterFS container name +  required: True +- name: IMAGE_VERSION +  displayName: GlusterFS container versiona +  required: True diff --git a/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml new file mode 100644 index 000000000..df045c170 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml @@ -0,0 +1,113 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: heketi +  labels: +    glusterfs: heketi-template +  annotations: +    description: Heketi service deployment template +    tags: glusterfs,heketi +labels: +  template: heketi +objects: +- kind: Service +  apiVersion: v1 +  metadata: +    name: heketi +    labels: +      glusterfs: heketi-service +    annotations: +      description: Exposes Heketi service +  spec: +    ports: +    - name: heketi +      port: 8080 +      targetPort: 8080 +    selector: +      glusterfs: heketi-pod +- kind: Route +  apiVersion: v1 +  metadata: +    name: heketi +    labels: +      glusterfs: heketi-route +  spec: +    to: +      kind: Service +      name: heketi +- kind: DeploymentConfig +  apiVersion: v1 +  metadata: +    name: heketi +    labels: +      glusterfs: heketi-dc +    annotations: +      description: Defines how to deploy Heketi +  spec: +    replicas: 1 +    selector: +      glusterfs: heketi-pod +    triggers: +    - type: ConfigChange +    strategy: +      type: Recreate +    template: +      metadata: +        name: heketi +        labels: +          glusterfs: heketi-pod +      spec: +        serviceAccountName: heketi-service-account +        containers: +        - name: heketi +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          imagePullPolicy: IfNotPresent +          env: +          - name: HEKETI_USER_KEY +            value: ${HEKETI_USER_KEY} +          - name: HEKETI_ADMIN_KEY +            value: ${HEKETI_ADMIN_KEY} +          - name: HEKETI_EXECUTOR +            value: kubernetes +          - name: HEKETI_FSTAB +            value: /var/lib/heketi/fstab +          - name: HEKETI_SNAPSHOT_LIMIT +            value: '14' +          - name: HEKETI_KUBE_GLUSTER_DAEMONSET +            value: '1' +          ports: +          - containerPort: 8080 +          volumeMounts: +          - name: db +            mountPath: /var/lib/heketi +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 3 +            httpGet: +              path: /hello +              port: 8080 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 30 +            httpGet: +              path: /hello +              port: 8080 +        volumes: +        - name: db +          glusterfs: +            endpoints: heketi-storage-endpoints +            path: heketidbstorage +parameters: +- name: HEKETI_USER_KEY +  displayName: Heketi User Secret +  description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY +  displayName: Heketi Administrator Secret +  description: Set secret for administration of the Heketi service as user _admin_ +- name: IMAGE_NAME +  displayName: GlusterFS container name +  required: True +- name: IMAGE_VERSION +  displayName: GlusterFS container versiona +  required: True diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py new file mode 100644 index 000000000..88801e487 --- /dev/null +++ b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py @@ -0,0 +1,23 @@ +''' + Openshift Storage GlusterFS class that provides useful filters used in GlusterFS +''' + + +def map_from_pairs(source, delim="="): +    ''' Returns a dict given the source and delim delimited ''' +    if source == '': +        return dict() + +    return dict(source.split(delim) for item in source.split(",")) + + +# pylint: disable=too-few-public-methods +class FilterModule(object): +    ''' OpenShift Storage GlusterFS Filters ''' + +    # pylint: disable=no-self-use, too-few-public-methods +    def filters(self): +        ''' Returns the names of the filters provided by this class ''' +        return { +            'map_from_pairs': map_from_pairs +        } diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml new file mode 100644 index 000000000..aab9851f9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: +  author: Jose A. Rivera +  description: OpenShift GlusterFS Cluster +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 2.2 +  platforms: +  - name: EL +    versions: +    - 7 +dependencies: +- role: openshift_hosted_facts +- role: openshift_repos +- role: lib_openshift diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml new file mode 100644 index 000000000..2b35e5137 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -0,0 +1,107 @@ +--- +- assert: +    that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1" +    msg: Only one GlusterFS nodeselector key pair should be provided + +- assert: +    that: "groups.oo_glusterfs_to_config | count >= 3" +    msg: There must be at least three GlusterFS nodes specified + +- name: Delete pre-existing GlusterFS resources +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: "template,daemonset" +    name: glusterfs +    state: absent +  when: openshift_storage_glusterfs_wipe + +- name: Unlabel any existing GlusterFS nodes +  oc_label: +    name: "{{ item }}" +    kind: node +    state: absent +    labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" +  with_items: "{{ groups.all }}" +  when: openshift_storage_glusterfs_wipe + +- name: Delete pre-existing GlusterFS config +  file: +    path: /var/lib/glusterd +    state: absent +  delegate_to: "{{ item }}" +  with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}" +  when: openshift_storage_glusterfs_wipe + +- name: Get GlusterFS storage devices state +  command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}" +  register: devices_info +  delegate_to: "{{ item }}" +  with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}" +  failed_when: False +  when: openshift_storage_glusterfs_wipe + +  # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume. +- name: Clear GlusterFS storage device contents +  shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}" +  delegate_to: "{{ item.item }}" +  with_items: "{{ devices_info.results }}" +  when: +  - openshift_storage_glusterfs_wipe +  - item.stdout_lines | count > 0 + +- name: Add service accounts to privileged SCC +  oc_adm_policy_user: +    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}" +    resource_kind: scc +    resource_name: privileged +    state: present +  with_items: +  - 'default' +  - 'router' + +- name: Label GlusterFS nodes +  oc_label: +    name: "{{ glusterfs_host }}" +    kind: node +    state: add +    labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" +  with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}" +  loop_control: +    loop_var: glusterfs_host + +- name: Copy GlusterFS DaemonSet template +  copy: +    src: "{{ openshift.common.examples_content_version }}/glusterfs-template.yml" +    dest: "{{ mktemp.stdout }}/glusterfs-template.yml" + +- name: Create GlusterFS template +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: template +    name: glusterfs +    state: present +    files: +    - "{{ mktemp.stdout }}/glusterfs-template.yml" + +- name: Deploy GlusterFS pods +  oc_process: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    template_name: "glusterfs" +    create: True +    params: +      IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}" +      IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}" + +- name: Wait for GlusterFS pods +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: pod +    state: list +    selector: "glusterfs-node=pod" +  register: glusterfs_pods +  until: +  - "glusterfs_pods.results.results[0]['items'] | count > 0" +  # There must be as many pods with 'Ready' staus  True as there are nodes expecting those pods +  - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count" +  delay: 10 +  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml new file mode 100644 index 000000000..9f092d5d5 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -0,0 +1,48 @@ +--- +- name: Delete pre-existing GlusterFS registry resources +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: "{{ item.kind }}" +    name: "{{ item.name | default(omit) }}" +    selector: "{{ item.selector | default(omit) }}" +    state: absent +  with_items: +  - kind: "svc,ep" +    name: "glusterfs-registry-endpoints" +  failed_when: False + +- name: Generate GlusterFS registry endpoints +  template: +    src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2" +    dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml" + +- name: Copy GlusterFS registry service +  copy: +    src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml" +    dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml" + +- name: Create GlusterFS registry endpoints +  oc_obj: +    namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" +    state: present +    kind: endpoints +    name: glusterfs-registry-endpoints +    files: +    - "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml" + +- name: Create GlusterFS registry service +  oc_obj: +    namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" +    state: present +    kind: service +    name: glusterfs-registry-endpoints +    files: +    - "{{ mktemp.stdout }}/glusterfs-registry-service.yml" + +- name: Check if GlusterFS registry volume exists +  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list" +  register: registry_volume + +- name: Create GlusterFS registry volume +  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" +  when: "'{{ openshift.hosted.registry.storage.glusterfs.path }}' not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml new file mode 100644 index 000000000..76ae1db75 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -0,0 +1,41 @@ +--- +- name: Copy initial heketi resource files +  copy: +    src: "{{ openshift.common.examples_content_version }}/{{ item }}" +    dest: "{{ mktemp.stdout }}/{{ item }}" +  with_items: +  - "deploy-heketi-template.yml" + +- name: Create deploy-heketi resources +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: template +    name: deploy-heketi +    state: present +    files: +    - "{{ mktemp.stdout }}/deploy-heketi-template.yml" + +- name: Deploy deploy-heketi pod +  oc_process: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    template_name: "deploy-heketi" +    create: True +    params: +      IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" +      IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" +      HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" +      HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + +- name: Wait for deploy-heketi pod +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: pod +    state: list +    selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" +  register: heketi_pod +  until: +  - "heketi_pod.results.results[0]['items'] | count > 0" +  # Pod's 'Ready' status must be True +  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" +  delay: 10 +  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml new file mode 100644 index 000000000..84b85e95d --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -0,0 +1,109 @@ +--- +- name: Create heketi DB volume +  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" +  register: setup_storage +  failed_when: False + +# This is used in the subsequent task +- name: Copy the admin client config +  command: > +    cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig +  changed_when: False +  check_mode: no + +# Need `command` here because heketi-storage.json contains multiple objects. +- name: Copy heketi DB to GlusterFS volume +  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}" +  when: "setup_storage.rc == 0" + +- name: Wait for copy job to finish +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: job +    state: list +    name: "heketi-storage-copy-job" +  register: heketi_job +  until: +  - "'results' in heketi_job.results and heketi_job.results.results | count > 0" +  # Pod's 'Complete' status must be True +  - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1" +  delay: 10 +  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" +  failed_when: +  - "'results' in heketi_job.results" +  - "heketi_job.results.results | count > 0" +  # Fail when pod's 'Failed' status is True +  - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1" +  when: "setup_storage.rc == 0" + +- name: Delete deploy resources +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: "{{ item.kind }}" +    name: "{{ item.name | default(omit) }}" +    selector: "{{ item.selector | default(omit) }}" +    state: absent +  with_items: +  - kind: "template,route,service,jobs,dc,secret" +    selector: "deploy-heketi" +  failed_when: False + +- name: Copy heketi template +  copy: +    src: "{{ openshift.common.examples_content_version }}/heketi-template.yml" +    dest: "{{ mktemp.stdout }}/heketi-template.yml" + +- name: Create heketi resources +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: template +    name: heketi +    state: present +    files: +    - "{{ mktemp.stdout }}/heketi-template.yml" + +- name: Deploy heketi pod +  oc_process: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    template_name: "heketi" +    create: True +    params: +      IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" +      IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" +      HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" +      HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + +- name: Wait for heketi pod +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: pod +    state: list +    selector: "glusterfs=heketi-pod" +  register: heketi_pod +  until: +  - "heketi_pod.results.results[0]['items'] | count > 0" +  # Pod's 'Ready' status must be True +  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" +  delay: 10 +  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + +- name: Determine heketi URL +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    state: list +    kind: ep +    selector: "glusterfs=heketi-service" +  register: heketi_url +  until: +  - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" +  - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" +  delay: 10 +  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + +- name: Set heketi URL +  set_fact: +    openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + +- name: Verify heketi service +  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" +  changed_when: False diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml new file mode 100644 index 000000000..265a3cc6e --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -0,0 +1,182 @@ +--- +- name: Create temp directory for doing work in +  command: mktemp -d /tmp/openshift-glusterfs-ansible-XXXXXX +  register: mktemp +  changed_when: False +  check_mode: no + +- name: Verify target namespace exists +  oc_project: +    state: present +    name: "{{ openshift_storage_glusterfs_namespace }}" +  when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native + +- include: glusterfs_deploy.yml +  when: openshift_storage_glusterfs_is_native + +- name: Make sure heketi-client is installed +  package: name=heketi-client state=present + +- name: Delete pre-existing heketi resources +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: "{{ item.kind }}" +    name: "{{ item.name | default(omit) }}" +    selector: "{{ item.selector | default(omit) }}" +    state: absent +  with_items: +  - kind: "template,route,service,jobs,dc,secret" +    selector: "deploy-heketi" +  - kind: "template,route,dc,service" +    name: "heketi" +  - kind: "svc,ep" +    name: "heketi-storage-endpoints" +  - kind: "sa" +    name: "heketi-service-account" +  failed_when: False +  when: openshift_storage_glusterfs_heketi_wipe + +- name: Wait for deploy-heketi pods to terminate +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: pod +    state: list +    selector: "glusterfs=deploy-heketi-pod" +  register: heketi_pod +  until: "heketi_pod.results.results[0]['items'] | count == 0" +  delay: 10 +  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" +  when: openshift_storage_glusterfs_heketi_wipe + +- name: Wait for heketi pods to terminate +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    kind: pod +    state: list +    selector: "glusterfs=heketi-pod" +  register: heketi_pod +  until: "heketi_pod.results.results[0]['items'] | count == 0" +  delay: 10 +  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" +  when: openshift_storage_glusterfs_heketi_wipe + +- name: Create heketi service account +  oc_serviceaccount: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    name: heketi-service-account +    state: present +  when: openshift_storage_glusterfs_heketi_is_native + +- name: Add heketi service account to privileged SCC +  oc_adm_policy_user: +    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" +    resource_kind: scc +    resource_name: privileged +    state: present +  when: openshift_storage_glusterfs_heketi_is_native + +- name: Allow heketi service account to view/edit pods +  oc_adm_policy_user: +    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" +    resource_kind: role +    resource_name: edit +    state: present +  when: openshift_storage_glusterfs_heketi_is_native + +- name: Check for existing deploy-heketi pod +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    state: list +    kind: pod +    selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" +  register: heketi_pod +  when: openshift_storage_glusterfs_heketi_is_native + +- name: Check if need to deploy deploy-heketi +  set_fact: +    openshift_storage_glusterfs_heketi_deploy_is_missing: False +  when: +  - "openshift_storage_glusterfs_heketi_is_native" +  - "heketi_pod.results.results[0]['items'] | count > 0" +  # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True +  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- name: Check for existing heketi pod +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    state: list +    kind: pod +    selector: "glusterfs=heketi-pod" +  register: heketi_pod +  when: openshift_storage_glusterfs_heketi_is_native + +- name: Check if need to deploy heketi +  set_fact: +    openshift_storage_glusterfs_heketi_is_missing: False +  when: +  - "openshift_storage_glusterfs_heketi_is_native" +  - "heketi_pod.results.results[0]['items'] | count > 0" +  # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True +  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- include: heketi_deploy_part1.yml +  when: +  - openshift_storage_glusterfs_heketi_is_native +  - openshift_storage_glusterfs_heketi_deploy_is_missing +  - openshift_storage_glusterfs_heketi_is_missing + +- name: Determine heketi URL +  oc_obj: +    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    state: list +    kind: ep +    selector: "glusterfs in (deploy-heketi-service, heketi-service)" +  register: heketi_url +  until: +  - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" +  - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" +  delay: 10 +  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" +  when: +  - openshift_storage_glusterfs_heketi_is_native +  - openshift_storage_glusterfs_heketi_url is undefined + +- name: Set heketi URL +  set_fact: +    openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" +  when: +  - openshift_storage_glusterfs_heketi_is_native +  - openshift_storage_glusterfs_heketi_url is undefined + +- name: Verify heketi service +  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" +  changed_when: False + +- name: Generate topology file +  template: +    src: "{{ openshift.common.examples_content_version }}/topology.json.j2" +    dest: "{{ mktemp.stdout }}/topology.json" +  when: +  - openshift_storage_glusterfs_is_native +  - openshift_storage_glusterfs_heketi_topology_load + +- name: Load heketi topology +  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" +  register: topology_load +  failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout" +  when: +  - openshift_storage_glusterfs_is_native +  - openshift_storage_glusterfs_heketi_topology_load + +- include: heketi_deploy_part2.yml +  when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing + +- include: glusterfs_registry.yml +  when: "openshift.hosted.registry.storage.kind == 'glusterfs'" + +- name: Delete temp directory +  file: +    name: "{{ mktemp.stdout }}" +    state: absent +  changed_when: False +  check_mode: no diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..d72d085c9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Endpoints +metadata: +  name: glusterfs-registry-endpoints +subsets: +- addresses: +{% for node in groups.oo_glusterfs_to_config %} +  - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} +  ports: +  - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 new file mode 100644 index 000000000..eb5b4544f --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 @@ -0,0 +1,39 @@ +{ +  "clusters": [ +{%- set clusters = {} -%} +{%- for node in groups.oo_glusterfs_to_config -%} +  {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} +  {%- if cluster in clusters -%} +    {%- set _dummy = clusters[cluster].append(node) -%} +  {%- else -%} +    {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} +  {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} +    { +      "nodes": [ +{%- for node in clusters[cluster] -%} +        { +          "node": { +            "hostnames": { +              "manage": [ +                "{{ hostvars[node].glusterfs_hostname | default(hostvars[node].openshift.common.hostname) }}" +              ], +              "storage": [ +                "{{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}" +              ] +            }, +            "zone": {{ hostvars[node].glusterfs_zone | default(1) }} +          }, +          "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} +            "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} +          ] +        }{% if not loop.last %},{% endif %} +{%- endfor -%} +      ] +    }{% if not loop.last %},{% endif %} +{%- endfor -%} +  ] +} diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index c3d001bb4..fa9b20e92 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -7,8 +7,13 @@  # Block attempts to install origin without specifying some kind of version information.  # This is because the latest tags for origin are usually alpha builds, which should not  # be used by default. Users must indicate what they want. -- fail: -    msg: "Must specify openshift_release or openshift_image_tag in inventory to install origin. (suggestion: add openshift_release=\"1.2\" to inventory)" +- name: Abort when we cannot safely guess what Origin image version the user wanted +  fail: +    msg: |- +      To install a containerized Origin release, you must set openshift_release or +      openshift_image_tag in your inventory to specify which version of the OpenShift +      component images to use. You may want the latest (usually alpha) releases or +      a more stable release. (Suggestion: add openshift_release="x.y" to inventory.)    when:    - is_containerized | bool    - openshift.common.deployment_type == 'origin' @@ -27,7 +32,10 @@    when: openshift_release is defined  # Verify that the image tag is in a valid format -- block: +- when: +  - openshift_image_tag is defined +  - openshift_image_tag != "latest" +  block:    # Verifies that when the deployment type is origin the version:    # - starts with a v @@ -35,12 +43,14 @@    # It also allows for optional trailing data which:    # - must start with a dash    # - may contain numbers, letters, dashes and dots. -  - name: Verify Origin openshift_image_tag is valid +  - name: (Origin) Verify openshift_image_tag is valid +    when: openshift.common.deployment_type == 'origin'      assert:        that:        - "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}" -      msg: "openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1" -    when: openshift.common.deployment_type == 'origin' +      msg: |- +        openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1 +        You specified openshift_image_tag={{ openshift_image_tag }}    # Verifies that when the deployment type is openshift-enterprise the version:    # - starts with a v @@ -48,16 +58,14 @@    # It also allows for optional trailing data which:    # - must start with a dash    # - may contain numbers -  - name: Verify Enterprise openshift_image_tag is valid +  - name: (Enterprise) Verify openshift_image_tag is valid +    when: openshift.common.deployment_type == 'openshift-enterprise'      assert:        that:        - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}" -      msg: "openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4" -    when: openshift.common.deployment_type == 'openshift-enterprise' - -  when: -  - openshift_image_tag is defined -  - openshift_image_tag != "latest" +      msg: |- +        openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4 +        You specified openshift_image_tag={{ openshift_image_tag }}  # Make sure we copy this to a fact if given a var:  - set_fact: @@ -119,30 +127,42 @@  - fail:      msg: openshift_version role was unable to set openshift_version +  name: Abort if openshift_version was not set    when: openshift_version is not defined  - fail:      msg: openshift_version role was unable to set openshift_image_tag +  name: Abort if openshift_image_tag was not set    when: openshift_image_tag is not defined  - fail:      msg: openshift_version role was unable to set openshift_pkg_version +  name: Abort if openshift_pkg_version was not set    when: openshift_pkg_version is not defined  - fail: -    msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories." +    msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories." +  name: Abort if openshift_pkg_version was not set    when:    - not is_containerized | bool    - openshift_version == '0.0' -# We can't map an openshift_release to full rpm version like we can with containers, make sure +# We can't map an openshift_release to full rpm version like we can with containers; make sure  # the rpm version we looked up matches the release requested and error out if not. -- fail: -    msg: "Detected OpenShift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories, inventory, or run the appropriate OpenShift upgrade playbook." +- name: For an RPM install, abort when the release requested does not match the available version.    when:    - not is_containerized | bool    - openshift_release is defined -  - not openshift_version.startswith(openshift_release) | bool +  assert: +    that: +    - openshift_version.startswith(openshift_release) | bool +    msg: |- +      You requested openshift_release {{ openshift_release }}, which is not matched by +      the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }} +      on host {{ inventory_hostname }}. +      We will only install the latest RPMs, so please ensure you are getting the release +      you expect. You may need to adjust your Ansible inventory, modify the repositories +      available on the host, or run the appropriate OpenShift upgrade playbook.  # The end result of these three variables is quite important so make sure they are displayed and logged:  - debug: var=openshift_release @@ -7,6 +7,7 @@ import os  import fnmatch  import re  import sys +import subprocess  import yaml  # Always prefer setuptools over distutils @@ -199,6 +200,52 @@ class OpenShiftAnsibleGenerateValidation(Command):          print('\nAll generate scripts passed.\n') +class OpenShiftAnsibleSyntaxCheck(Command): +    ''' Command to run Ansible syntax check''' +    description = "Run Ansible syntax check" +    user_options = [] + +    # Colors +    FAIL = '\033[91m'  # Red +    ENDC = '\033[0m'  # Reset + +    def initialize_options(self): +        ''' initialize_options ''' +        pass + +    def finalize_options(self): +        ''' finalize_options ''' +        pass + +    def run(self): +        ''' run command ''' + +        has_errors = False + +        for yaml_file in find_files( +                os.path.join(os.getcwd(), 'playbooks', 'byo'), +                None, None, r'\.ya?ml$'): +            with open(yaml_file, 'r') as contents: +                for line in contents: +                    # initialize_groups.yml is used to identify entry point playbooks +                    if re.search(r'initialize_groups\.yml', line): +                        print('-' * 60) +                        print('Syntax checking playbook: %s' % yaml_file) +                        try: +                            subprocess.check_output( +                                ['ansible-playbook', '-i localhost,', +                                 '--syntax-check', yaml_file] +                            ) +                        except subprocess.CalledProcessError as cpe: +                            print('{}Execution failed: {}{}'.format( +                                self.FAIL, cpe, self.ENDC)) +                            has_errors = True +                        # Break for loop, no need to continue looping lines +                        break +        if has_errors: +            raise SystemExit(1) + +  class UnsupportedCommand(Command):      ''' Basic Command to override unsupported commands '''      user_options = [] @@ -242,6 +289,7 @@ setup(          'lint': OpenShiftAnsiblePylint,          'yamllint': OpenShiftAnsibleYamlLint,          'generate_validation': OpenShiftAnsibleGenerateValidation, +        'ansible_syntax': OpenShiftAnsibleSyntaxCheck,      },      packages=[],  ) diff --git a/test-requirements.txt b/test-requirements.txt index 805828e1c..585cca0b9 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,11 +1,14 @@ +# Versions are pinned to prevent pypi releases arbitrarily breaking +# tests with new APIs/semantics. We want to update versions deliberately. +  # flake8 must be listed before pylint to avoid dependency conflicts -flake8 -flake8-mutable -flake8-print -pylint -setuptools-lint -yamllint -coverage -mock -pytest -pytest-cov +flake8==3.3.0 +flake8-mutable==1.1.0 +flake8-print==2.0.2 +pylint==1.6.5 +setuptools-lint==0.5.2 +yamllint==1.6.1 +coverage==4.3.4 +mock==2.0.0 +pytest==3.0.7 +pytest-cov==2.4.0 @@ -11,7 +11,7 @@ skip_install=True  deps =      -rrequirements.txt      -rtest-requirements.txt -    py35-flake8: flake8-bugbear +    py35-flake8: flake8-bugbear==17.3.0  commands =      unit: pip install -e utils @@ -21,4 +21,4 @@ commands =      yamllint: python setup.py yamllint      generate_validation: python setup.py generate_validation      # TODO(rhcarvalho): check syntax of other important entrypoint playbooks -    ansible_syntax: ansible-playbook --syntax-check playbooks/byo/config.yml +    ansible_syntax: python setup.py ansible_syntax | 
