diff options
83 files changed, 1321 insertions, 135 deletions
diff --git a/files/origin-components/apiserver-template.yaml b/files/origin-components/apiserver-template.yaml index 035e4734b..4dd9395d0 100644 --- a/files/origin-components/apiserver-template.yaml +++ b/files/origin-components/apiserver-template.yaml @@ -4,7 +4,7 @@ metadata: name: template-service-broker-apiserver parameters: - name: IMAGE - value: openshift/origin:latest + value: openshift/origin-template-service-broker:latest - name: NAMESPACE value: openshift-template-service-broker - name: LOGLEVEL @@ -40,14 +40,14 @@ objects: image: ${IMAGE} imagePullPolicy: IfNotPresent command: - - "/usr/bin/openshift" + - "/usr/bin/template-service-broker" - "start" - "template-service-broker" - "--secure-port=8443" - "--audit-log-path=-" - "--tls-cert-file=/var/serving-cert/tls.crt" - "--tls-private-key-file=/var/serving-cert/tls.key" - - "--loglevel=${LOGLEVEL}" + - "--v=${LOGLEVEL}" - "--config=/var/apiserver-config/apiserver-config.yaml" ports: - containerPort: 8443 diff --git a/files/origin-components/console-config.yaml b/files/origin-components/console-config.yaml index 8f3f87c0b..e104e8028 100644 --- a/files/origin-components/console-config.yaml +++ b/files/origin-components/console-config.yaml @@ -1,5 +1,5 @@ -kind: AssetConfig -apiVersion: v1 +kind: WebConsoleConfiguration +apiVersion: webconsole.config.openshift.io/v1 extensionDevelopment: false extensionProperties: null extensionScripts: null @@ -18,4 +18,4 @@ servingInfo: keyFile: /var/serving-cert/tls.key maxRequestsInFlight: 0 namedCertificates: null - requestTimeoutSeconds: 0
\ No newline at end of file + requestTimeoutSeconds: 0 diff --git a/files/origin-components/console-template.yaml b/files/origin-components/console-template.yaml index b2a6569fd..7bf2d0cf4 100644 --- a/files/origin-components/console-template.yaml +++ b/files/origin-components/console-template.yaml @@ -14,6 +14,7 @@ parameters: - name: IMAGE value: openshift/origin-web-console:latest - name: NAMESPACE + # This namespace cannot be changed. Only `openshift-web-console` is supported. value: openshift-web-console - name: LOGLEVEL value: "0" @@ -51,6 +52,7 @@ objects: command: - "/usr/bin/origin-web-console" - "--audit-log-path=-" + - "-v=${LOGLEVEL}" - "--config=/var/webconsole-config/webconsole-config.yaml" ports: - containerPort: 8443 @@ -64,15 +66,20 @@ objects: path: /healthz port: 8443 scheme: HTTPS + livenessProbe: + httpGet: + path: / + port: 8443 + scheme: HTTPS nodeSelector: "${{NODE_SELECTOR}}" volumes: - name: serving-cert secret: - defaultMode: 420 + defaultMode: 400 secretName: webconsole-serving-cert - name: webconsole-config configMap: - defaultMode: 420 + defaultMode: 440 name: webconsole-config # to create the config for the web console diff --git a/inventory/hosts.example b/inventory/hosts.example index d786146fc..da60b63e6 100644 --- a/inventory/hosts.example +++ b/inventory/hosts.example @@ -286,6 +286,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_cloudprovider_openstack_region=region #openshift_cloudprovider_openstack_lb_subnet_id=subnet_id # +# Note: If you're getting a "BS API version autodetection failed" when provisioning cinder volumes you may need this setting +#openshift_cloudprovider_openstack_blockstorage_version=v2 +# # GCE #openshift_cloudprovider_kind=gce # @@ -909,6 +912,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] #openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'} #openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'} +#openshift_buildoverrides_tolerations=[{'key':'mykey1','value':'myvalue1','effect':'NoSchedule','operator':'Equal'}] # Or you may optionally define your own build overrides configuration serialized as json #openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}' @@ -1005,6 +1009,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # where as this would not # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 # +# A timeout to wait for nodes to drain pods can be specified to ensure that the +# upgrade continues even if nodes fail to drain pods in the allowed time. The +# default value of 0 will wait indefinitely allowing the admin to investigate +# the root cause and ensuring that disruption budgets are respected. If the +# a timeout of 0 is used there will also be one attempt to re-try draining the +# node. If a non zero timeout is specified there will be no attempt to retry. +#openshift_upgrade_nodes_drain_timeout=0 +# # Multiple data migrations take place and if they fail they will fail the upgrade # You may wish to disable these or make them non fatal # diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index ef8233b67..6d82fa928 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -17,6 +17,8 @@ - name: Create service signer certificate hosts: oo_first_master + roles: + - openshift_facts tasks: - name: Create remote temp directory for creating certs command: mktemp -d /tmp/openshift-ansible-XXXXXXX diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index ffb11670d..8392e21ee 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -51,13 +51,19 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade register: l_docker_upgrade_drain_result until: not (l_docker_upgrade_drain_result is failed) - retries: 60 - delay: 60 + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" + delay: 5 + failed_when: + - l_docker_upgrade_drain_result is failed + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 - include_tasks: tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 412075d41..e89f06f17 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -22,6 +22,8 @@ # See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 - name: Pre master upgrade - Upgrade all storage hosts: oo_first_master + roles: + - openshift_facts tasks: - name: Upgrade all storage command: > @@ -49,10 +51,9 @@ vars: openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 + roles: + - openshift_facts tasks: - - import_role: - name: openshift_facts - # Run the pre-upgrade hook if defined: - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined @@ -127,6 +128,7 @@ hosts: oo_masters_to_config roles: - { role: openshift_cli } + - { role: openshift_facts } vars: __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: @@ -289,12 +291,18 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_control_plane_drain_result until: not (l_upgrade_control_plane_drain_result is failed) - retries: 60 - delay: 60 + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" + delay: 5 + failed_when: + - l_upgrade_control_plane_drain_result is failed + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 roles: - openshift_facts diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 464af3ae6..850442b3b 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -33,12 +33,18 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not (l_upgrade_nodes_drain_result is failed) - retries: 60 - delay: 60 + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" + delay: 5 + failed_when: + - l_upgrade_nodes_drain_result is failed + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 post_tasks: - import_role: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index 6d59bfd0b..e259b5d09 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -50,11 +50,11 @@ delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not (l_upgrade_nodes_drain_result is failed) - retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}" + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" delay: 5 failed_when: - l_upgrade_nodes_drain_result is failed - - openshift_upgrade_nodes_drain_timeout | default(0) == '0' + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 # Alright, let's clean up! - name: clean up the old scale group diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index 49e691352..9c7688981 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -7,6 +7,7 @@ hosts: oo_first_master roles: - { role: lib_openshift } + - { role: openshift_facts } tasks: - name: Check for invalid namespaces and SDN errors diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index 0aea5069d..552bea5e7 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -41,13 +41,13 @@ roles: - role: openshift_facts tasks: - - name: Stop {{ openshift.common.service_type }}-master-controllers + - name: Stop {{ openshift_service_type }}-master-controllers systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: stopped - - name: Start {{ openshift.common.service_type }}-master-controllers + - name: Start {{ openshift_service_type }}-master-controllers systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: started - import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/init/version.yml b/playbooks/init/version.yml index 8d1d61fde..962ee7220 100644 --- a/playbooks/init/version.yml +++ b/playbooks/init/version.yml @@ -6,7 +6,7 @@ - include_role: name: openshift_version tasks_from: first_master.yml - - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}" + - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version | default('') }}" # NOTE: We set this even on etcd hosts as they may also later run as masters, # and we don't want to install wrong version of docker and have to downgrade @@ -16,7 +16,7 @@ vars: l_default_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master" l_first_master_openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" - l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version }}" + l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version | default('') }}" l_first_master_openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag}}" tasks: - set_fact: diff --git a/playbooks/openshift-master/private/certificates-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml index 4dbc041b0..56af18ca7 100644 --- a/playbooks/openshift-master/private/certificates-backup.yml +++ b/playbooks/openshift-master/private/certificates-backup.yml @@ -28,6 +28,7 @@ path: "{{ openshift.common.config_base }}/master/{{ item }}" state: absent with_items: + # certificates_to_synchronize is a custom filter in lib_utils - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}" - "etcd.server.crt" - "etcd.server.key" diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index 2c9b70b5f..2caf63592 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -372,6 +372,112 @@ In order to set a custom entrypoint, update `openshift_master_cluster_public_hos Note than an empty hostname does not work, so if your domain is `openshift.example.com`, you cannot set this value to simply `openshift.example.com`. + +## Using Cinder-backed Persistent Volumes + +You will need to set up OpenStack credentials. You can try putting this in your +`inventory/group_vars/OSEv3.yml`: + + openshift_cloudprovider_kind: openstack + openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" + openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" + openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" + openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_PROJECT_NAME') }}" + openshift_cloudprovider_openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" + openshift_cloudprovider_openstack_blockstorage_version: v2 + +**NOTE**: you must specify the Block Storage version as v2, because OpenShift +does not support the v3 API yet and the version detection is currently not +working properly. + +For more information, consult the [Configuring for OpenStack page in the OpenShift documentation][openstack-credentials]. + +[openstack-credentials]: https://docs.openshift.org/latest/install_config/configuring_openstack.html#install-config-configuring-openstack + +**NOTE** the OpenStack integration currently requires DNS to be configured and +running and the `openshift_hostname` variable must match the Nova server name +for each node. The cluster deployment will fail without it. If you use the +provided OpenStack dynamic inventory and configure the +`openshift_openstack_dns_nameservers` Ansible variable, this will be handled +for you. + +After a successful deployment, the cluster is configured for Cinder persistent +volumes. + +### Validation + +1. Log in and create a new project (with `oc login` and `oc new-project`) +2. Create a file called `cinder-claim.yaml` with the following contents: + +```yaml +apiVersion: "v1" +kind: "PersistentVolumeClaim" +metadata: + name: "claim1" +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "1Gi" +``` +3. Run `oc create -f cinder-claim.yaml` to create the Persistent Volume Claim object in OpenShift +4. Run `oc describe pvc claim1` to verify that the claim was created and its Status is `Bound` +5. Run `openstack volume list` + * A new volume called `kubernetes-dynamic-pvc-UUID` should be created + * Its size should be `1` + * It should not be attached to any server +6. Create a file called `mysql-pod.yaml` with the following contents: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: mysql + labels: + name: mysql +spec: + containers: + - resources: + limits : + cpu: 0.5 + image: openshift/mysql-55-centos7 + name: mysql + env: + - name: MYSQL_ROOT_PASSWORD + value: yourpassword + - name: MYSQL_USER + value: wp_user + - name: MYSQL_PASSWORD + value: wp_pass + - name: MYSQL_DATABASE + value: wp_db + ports: + - containerPort: 3306 + name: mysql + volumeMounts: + - name: mysql-persistent-storage + mountPath: /var/lib/mysql/data + volumes: + - name: mysql-persistent-storage + persistentVolumeClaim: + claimName: claim1 +``` + +7. Run `oc create -f mysql-pod.yaml` to create the pod +8. Run `oc describe pod mysql` + * Its events should show that the pod has successfully attached the volume above + * It should show no errors + * `openstack volume list` should show the volume attached to an OpenShift app node + * NOTE: this can take several seconds +9. After a while, `oc get pod` should show the `mysql` pod as running +10. Run `oc delete pod mysql` to remove the pod + * The Cinder volume should no longer be attached +11. Run `oc delete pvc claim1` to remove the volume claim + * The Cinder volume should be deleted + + + ## Creating and using a Cinder volume for the OpenShift registry You can optionally have the playbooks create a Cinder volume and set diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 481807dc9..a8663f946 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -20,6 +20,7 @@ openshift_hosted_registry_wait: True #openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" #openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" #openshift_cloudprovider_openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}" +#openshift_cloudprovider_openstack_blockstorage_version: v2 ## Use Cinder volume for Openshift registry: diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/sample-inventory/inventory.py index 45cc4e15a..76e658eb7 100755 --- a/playbooks/openstack/sample-inventory/inventory.py +++ b/playbooks/openstack/sample-inventory/inventory.py @@ -89,13 +89,15 @@ def build_inventory(): # TODO(shadower): what about multiple networks? if server.private_v4: hostvars['private_v4'] = server.private_v4 + hostvars['openshift_ip'] = server.private_v4 + # NOTE(shadower): Yes, we set both hostname and IP to the private # IP address for each node. OpenStack doesn't resolve nodes by # name at all, so using a hostname here would require an internal # DNS which would complicate the setup and potentially introduce # performance issues. - hostvars['openshift_ip'] = server.private_v4 - hostvars['openshift_hostname'] = server.private_v4 + hostvars['openshift_hostname'] = server.metadata.get( + 'openshift_hostname', server.private_v4) hostvars['openshift_public_hostname'] = server.name if server.metadata['host-type'] == 'cns': diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml index 7831f4c7d..8dd916e79 100644 --- a/roles/container_runtime/tasks/docker_upgrade_check.yml +++ b/roles/container_runtime/tasks/docker_upgrade_check.yml @@ -21,6 +21,7 @@ retries: 4 until: curr_docker_version is succeeded changed_when: false + when: not openshift_is_atomic | bool - name: Get latest available version of Docker command: > @@ -29,7 +30,9 @@ retries: 4 until: avail_docker_version is succeeded # Don't expect docker rpm to be available on hosts that don't already have it installed: - when: pkg_check.rc == 0 + when: + - not openshift_is_atomic | bool + - pkg_check.rc == 0 failed_when: false changed_when: false @@ -37,9 +40,10 @@ msg: This playbook requires access to Docker 1.12 or later # Disable the 1.12 requirement if the user set a specific Docker version when: - - docker_version is not defined - - docker_upgrade is not defined or docker_upgrade | bool == True - - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<'))) + - not openshift_is_atomic | bool + - docker_version is not defined + - docker_upgrade is not defined or docker_upgrade | bool == True + - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<'))) # Default l_docker_upgrade to False, we'll set to True if an upgrade is required: - set_fact: @@ -48,14 +52,17 @@ # Make sure a docker_version is set if none was requested: - set_fact: docker_version: "{{ avail_docker_version.stdout }}" - when: pkg_check.rc == 0 and docker_version is not defined + when: + - not openshift_is_atomic | bool + - pkg_check.rc == 0 and docker_version is not defined - name: Flag for Docker upgrade if necessary set_fact: l_docker_upgrade: True when: - - pkg_check.rc == 0 - - curr_docker_version.stdout is version_compare(docker_version,'<') + - not openshift_is_atomic | bool + - pkg_check.rc == 0 + - curr_docker_version.stdout is version_compare(docker_version,'<') # Additional checks for Atomic hosts: - name: Determine available Docker @@ -70,5 +77,5 @@ - fail: msg: This playbook requires access to Docker 1.12 or later when: - - openshift_is_atomic | bool - - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<') + - openshift_is_atomic | bool + - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<') diff --git a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml index 78578a055..ce295d2f5 100644 --- a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml @@ -57,6 +57,7 @@ # Certificates must be signed serially in order to avoid competing # for the serial file. +# delegated_serial_command is a custom module in lib_utils - name: Sign and create the client crt delegated_serial_command: command: > diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml index 987380d0c..7c8b87d99 100644 --- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml @@ -50,6 +50,7 @@ # Certificates must be signed serially in order to avoid competing # for the serial file. +# delegated_serial_command is a custom module in lib_utils - name: Sign and create the server crt delegated_serial_command: command: > @@ -83,6 +84,7 @@ # Certificates must be signed serially in order to avoid competing # for the serial file. +# delegated_serial_command is a custom module in lib_utils - name: Sign and create the peer crt delegated_serial_command: command: > diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py index eb13a58ba..eb13a58ba 100644 --- a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py +++ b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/lib_utils/filter_plugins/oo_cert_expiry.py index 58b228fee..58b228fee 100644 --- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py +++ b/roles/lib_utils/filter_plugins/oo_cert_expiry.py diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py index a2ea287cf..fc14b5633 100644 --- a/roles/lib_utils/filter_plugins/oo_filters.py +++ b/roles/lib_utils/filter_plugins/oo_filters.py @@ -589,6 +589,14 @@ that result to this filter plugin. return secret_name +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(item.split(delim) for item in source.split(",")) + + class FilterModule(object): """ Custom ansible filter mapping """ @@ -618,4 +626,5 @@ class FilterModule(object): "lib_utils_oo_contains_rule": lib_utils_oo_contains_rule, "lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list, "lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets, + "map_from_pairs": map_from_pairs } diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/lib_utils/filter_plugins/openshift_aws_filters.py index dfcb11da3..dfcb11da3 100644 --- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py +++ b/roles/lib_utils/filter_plugins/openshift_aws_filters.py diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py index 003ce5f9e..003ce5f9e 100644 --- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py +++ b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/lib_utils/filter_plugins/openshift_master.py index ff15f693b..ff15f693b 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/lib_utils/filter_plugins/openshift_master.py diff --git a/roles/etcd/library/delegated_serial_command.py b/roles/lib_utils/library/delegated_serial_command.py index 0cab1ca88..0cab1ca88 100755 --- a/roles/etcd/library/delegated_serial_command.py +++ b/roles/lib_utils/library/delegated_serial_command.py diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/lib_utils/library/openshift_cert_expiry.py index e355266b0..e355266b0 100644 --- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py +++ b/roles/lib_utils/library/openshift_cert_expiry.py diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py index 440b8ec28..440b8ec28 100644 --- a/roles/openshift_cli/library/openshift_container_binary_sync.py +++ b/roles/lib_utils/library/openshift_container_binary_sync.py diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py new file mode 100644 index 000000000..4858c5ec6 --- /dev/null +++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py @@ -0,0 +1,143 @@ +# pylint: disable=missing-docstring + +import re +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + # pylint: disable=too-many-branches,too-many-statements,too-many-arguments + + def run(self, terms, variables=None, regions_enabled=True, short_version=None, + deployment_type=None, **kwargs): + + predicates = [] + + if short_version is None or deployment_type is None: + if 'openshift' not in variables: + raise AnsibleError("This lookup module requires openshift_facts to be run prior to use") + + if deployment_type is None: + if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']: + raise AnsibleError("This lookup module requires that the deployment_type be set") + + deployment_type = variables['openshift']['common']['deployment_type'] + + if short_version is None: + if 'short_version' in variables['openshift']['common']: + short_version = variables['openshift']['common']['short_version'] + elif 'openshift_release' in variables: + release = variables['openshift_release'] + if release.startswith('v'): + short_version = release[1:] + else: + short_version = release + short_version = '.'.join(short_version.split('.')[0:2]) + elif 'openshift_version' in variables: + version = variables['openshift_version'] + short_version = '.'.join(version.split('.')[0:2]) + else: + # pylint: disable=line-too-long + raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified") + if deployment_type == 'origin': + if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + elif deployment_type == 'openshift-enterprise': + if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + else: + raise AnsibleError("Unknown deployment_type %s" % deployment_type) + + if deployment_type == 'origin': + # convert short_version to enterprise short_version + short_version = re.sub('^1.', '3.', short_version) + + if short_version == 'latest': + short_version = '3.9' + + # Predicates ordered according to OpenShift Origin source: + # origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go + + if short_version == '3.1': + predicates.extend([ + {'name': 'PodFitsHostPorts'}, + {'name': 'PodFitsResources'}, + {'name': 'NoDiskConflict'}, + {'name': 'MatchNodeSelector'}, + ]) + + if short_version == '3.2': + predicates.extend([ + {'name': 'PodFitsHostPorts'}, + {'name': 'PodFitsResources'}, + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MatchNodeSelector'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'} + ]) + + if short_version == '3.3': + predicates.extend([ + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'} + ]) + + if short_version == '3.4': + predicates.extend([ + {'name': 'NoDiskConflict'}, + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'MatchInterPodAffinity'} + ]) + + if short_version in ['3.5', '3.6']: + predicates.extend([ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + ]) + + if short_version in ['3.7', '3.8', '3.9']: + predicates.extend([ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MaxAzureDiskVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'NoVolumeNodeConflict'}, + ]) + + if regions_enabled: + region_predicate = { + 'name': 'Region', + 'argument': { + 'serviceAffinity': { + 'labels': ['region'] + } + } + } + predicates.append(region_predicate) + + return predicates diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py new file mode 100644 index 000000000..18e1b2e0c --- /dev/null +++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py @@ -0,0 +1,117 @@ +# pylint: disable=missing-docstring + +import re +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + # pylint: disable=too-many-branches,too-many-statements,too-many-arguments + + def run(self, terms, variables=None, zones_enabled=True, short_version=None, + deployment_type=None, **kwargs): + + priorities = [] + + if short_version is None or deployment_type is None: + if 'openshift' not in variables: + raise AnsibleError("This lookup module requires openshift_facts to be run prior to use") + + if deployment_type is None: + if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']: + raise AnsibleError("This lookup module requires that the deployment_type be set") + + deployment_type = variables['openshift']['common']['deployment_type'] + + if short_version is None: + if 'short_version' in variables['openshift']['common']: + short_version = variables['openshift']['common']['short_version'] + elif 'openshift_release' in variables: + release = variables['openshift_release'] + if release.startswith('v'): + short_version = release[1:] + else: + short_version = release + short_version = '.'.join(short_version.split('.')[0:2]) + elif 'openshift_version' in variables: + version = variables['openshift_version'] + short_version = '.'.join(version.split('.')[0:2]) + else: + # pylint: disable=line-too-long + raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified") + + if deployment_type == 'origin': + if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + elif deployment_type == 'openshift-enterprise': + if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']: + raise AnsibleError("Unknown short_version %s" % short_version) + else: + raise AnsibleError("Unknown deployment_type %s" % deployment_type) + + if deployment_type == 'origin': + # convert short_version to origin short_version + short_version = re.sub('^1.', '3.', short_version) + + if short_version == 'latest': + short_version = '3.9' + + if short_version == '3.1': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1} + ]) + + if short_version == '3.2': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodeAffinityPriority', 'weight': 1} + ]) + + if short_version == '3.3': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1} + ]) + + if short_version == '3.4': + priorities.extend([ + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1}, + {'name': 'InterPodAffinityPriority', 'weight': 1} + ]) + + if short_version in ['3.5', '3.6', '3.7', '3.8', '3.9']: + priorities.extend([ + {'name': 'SelectorSpreadPriority', 'weight': 1}, + {'name': 'InterPodAffinityPriority', 'weight': 1}, + {'name': 'LeastRequestedPriority', 'weight': 1}, + {'name': 'BalancedResourceAllocation', 'weight': 1}, + {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, + {'name': 'NodeAffinityPriority', 'weight': 1}, + {'name': 'TaintTolerationPriority', 'weight': 1} + ]) + + if zones_enabled: + zone_priority = { + 'name': 'Zone', + 'argument': { + 'serviceAntiAffinity': { + 'label': 'zone' + } + }, + 'weight': 2 + } + priorities.append(zone_priority) + + return priorities diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/lib_utils/test/conftest.py index df948fff0..aabdd4fa1 100644 --- a/roles/openshift_certificate_expiry/test/conftest.py +++ b/roles/lib_utils/test/conftest.py @@ -1,7 +1,15 @@ # pylint: disable=missing-docstring,invalid-name,redefined-outer-name +import os import pytest +import sys + from OpenSSL import crypto +sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins")) + +from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule # noqa: E402 +from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule # noqa: E402 + # Parameter list for valid_cert fixture VALID_CERTIFICATE_PARAMS = [ { @@ -117,3 +125,48 @@ def valid_cert(request, ca): 'cert_file': cert_file, 'cert': cert } + + +@pytest.fixture() +def predicates_lookup(): + return PredicatesLookupModule() + + +@pytest.fixture() +def priorities_lookup(): + return PrioritiesLookupModule() + + +@pytest.fixture() +def facts(): + return { + 'openshift': { + 'common': {} + } + } + + +@pytest.fixture(params=[True, False]) +def regions_enabled(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def zones_enabled(request): + return request.param + + +def v_prefix(release): + """Prefix a release number with 'v'.""" + return "v" + release + + +def minor(release): + """Add a suffix to release, making 'X.Y' become 'X.Y.Z'.""" + return release + ".1" + + +@pytest.fixture(params=[str, v_prefix, minor]) +def release_mod(request): + """Modifies a release string to alternative valid values.""" + return request.param diff --git a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py index e8da1e04a..e8da1e04a 100644 --- a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py +++ b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py diff --git a/roles/openshift_master_facts/test/conftest.py b/roles/lib_utils/test/openshift_master_facts_conftest.py index 140cced73..140cced73 100644 --- a/roles/openshift_master_facts/test/conftest.py +++ b/roles/lib_utils/test/openshift_master_facts_conftest.py diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py index 11aad9f03..11aad9f03 100644 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py +++ b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py index 527fc9ff4..527fc9ff4 100644 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py +++ b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/lib_utils/test/test_fakeopensslclasses.py index 8a521a765..8a521a765 100644 --- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py +++ b/roles/lib_utils/test/test_fakeopensslclasses.py diff --git a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py b/roles/lib_utils/test/test_load_and_handle_cert.py index 98792e2ee..98792e2ee 100644 --- a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py +++ b/roles/lib_utils/test/test_load_and_handle_cert.py diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 71de24339..8c8227b5e 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -109,6 +109,7 @@ openshift_aws_node_group_config_node_volumes: device_type: gp2 delete_on_termination: True +# build_instance_tags is a custom filter in role lib_utils openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_group_termination_policy: Default openshift_aws_node_group_replace_instances: [] @@ -201,6 +202,7 @@ openshift_aws_node_group_config: openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}" openshift_aws_elb_az_load_balancing: False +# build_instance_tags is a custom filter in role lib_utils openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}" diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 9485cc3ac..a9f9cc3c4 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -43,6 +43,7 @@ - name: set the value for the deployment_serial and the current asgs set_fact: + # scale_groups_serial is a custom filter in role lib_utils l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}" openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}" diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml index 1f4ef3e1c..3ad876e37 100644 --- a/roles/openshift_aws/tasks/wait_for_groups.yml +++ b/roles/openshift_aws/tasks/wait_for_groups.yml @@ -8,6 +8,7 @@ tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}" register: qasg + # scale_groups_match_capacity is a custom filter in role lib_utils until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool delay: 10 retries: 60 diff --git a/roles/openshift_buildoverrides/vars/main.yml b/roles/openshift_buildoverrides/vars/main.yml index cf49a6ebf..df53280c8 100644 --- a/roles/openshift_buildoverrides/vars/main.yml +++ b/roles/openshift_buildoverrides/vars/main.yml @@ -9,3 +9,4 @@ buildoverrides_yaml: imageLabels: "{{ openshift_buildoverrides_image_labels | default(None) }}" nodeSelector: "{{ openshift_buildoverrides_nodeselectors | default(None) }}" annotations: "{{ openshift_buildoverrides_annotations | default(None) }}" + tolerations: "{{ openshift_buildoverrides_tolerations | default(None) }}" diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml index 8dea2c07f..7062b5060 100644 --- a/roles/openshift_certificate_expiry/tasks/main.yml +++ b/roles/openshift_certificate_expiry/tasks/main.yml @@ -16,7 +16,9 @@ - name: Generate the result JSON string run_once: yes - set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}" + set_fact: + # oo_cert_expiry_results_to_json is a custom filter in role lib_utils + json_result_string: "{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}" when: openshift_certificate_expiry_save_json_results|bool - name: Generate results JSON file diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 37bed9dbe..ae8d1ace0 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -12,6 +12,7 @@ register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" + # openshift_container_binary_sync is a custom module in lib_utils - name: Copy client binaries/symlinks out of CLI image for use on the host openshift_container_binary_sync: image: "{{ openshift_cli_image }}" @@ -28,6 +29,7 @@ register: pull_result changed_when: "'Pulling layer' in pull_result.stdout" + # openshift_container_binary_sync is a custom module in lib_utils - name: Copy client binaries/symlinks out of CLI image for use on the host openshift_container_binary_sync: image: "{{ '' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}" diff --git a/roles/openshift_cloud_provider/templates/openstack.conf.j2 b/roles/openshift_cloud_provider/templates/openstack.conf.j2 index 313ee02b4..30f18ffa9 100644 --- a/roles/openshift_cloud_provider/templates/openstack.conf.j2 +++ b/roles/openshift_cloud_provider/templates/openstack.conf.j2 @@ -19,3 +19,7 @@ region = {{ openshift_cloudprovider_openstack_region }} [LoadBalancer] subnet-id = {{ openshift_cloudprovider_openstack_lb_subnet_id }} {% endif %} +{% if openshift_cloudprovider_openstack_blockstorage_version is defined %} +[BlockStorage] +bs-version={{ openshift_cloudprovider_openstack_blockstorage_version }} +{% endif %}
\ No newline at end of file diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index dcaf87eca..c83adb26d 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -175,6 +175,8 @@ def format_failure(failure): play = failure['play'] task = failure['task'] msg = failure['msg'] + if not isinstance(msg, string_types): + msg = str(msg) checks = failure['checks'] fields = ( (u'Hosts', host), diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py index 87e6146d4..6e30a8610 100644 --- a/roles/openshift_health_checker/openshift_checks/disk_availability.py +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -21,7 +21,7 @@ class DiskAvailability(OpenShiftCheck): 'oo_etcd_to_config': 20 * 10**9, }, # Used to copy client binaries into, - # see roles/openshift_cli/library/openshift_container_binary_sync.py. + # see roles/lib_utils/library/openshift_container_binary_sync.py. '/usr/local/bin': { 'oo_masters_to_config': 1 * 10**9, 'oo_nodes_to_config': 1 * 10**9, diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml index 2dc9c98f6..c2be00d19 100644 --- a/roles/openshift_hosted/tasks/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -18,6 +18,7 @@ - name: set_fact replicas set_fact: + # get_router_replicas is a custom filter in role lib_utils replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}" - name: Get the certificate contents for router diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index ba412b5a6..247c7e4df 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -79,14 +79,6 @@ def entry_from_named_pair(register_pairs, key): raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key)) -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - def serviceaccount_name(qualified_sa): ''' Returns the simple name from a fully qualified name ''' return qualified_sa.split(":")[-1] @@ -134,7 +126,6 @@ class FilterModule(object): return { 'random_word': random_word, 'entry_from_named_pair': entry_from_named_pair, - 'map_from_pairs': map_from_pairs, 'min_cpu': min_cpu, 'es_storage': es_storage, 'serviceaccount_name': serviceaccount_name, diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 67904a9d3..ebd2d747b 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -94,7 +94,7 @@ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}" with_together: - - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}" + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() | list }}" - "{{ openshift_logging_facts.elasticsearch.pvcs }}" - "{{ es_indices }}" loop_control: @@ -169,7 +169,7 @@ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}" with_together: - - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}" + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() | list }}" - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}" - "{{ es_ops_indices }}" loop_control: diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml index 5bee58725..df5299a83 100644 --- a/roles/openshift_logging_curator/vars/main.yml +++ b/roles/openshift_logging_curator/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_curator_version: "3_8" -__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_curator_version: "3_9" +__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index ef259cd3a..122231031 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -1,6 +1,6 @@ --- -__latest_es_version: "3_8" -__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_es_version: "3_9" +__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] __allowed_es_types: ["data-master", "data-client", "master", "client"] __es_log_appenders: ['file', 'console'] __kibana_index_modes: ["unique", "shared_ops"] diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index 9b58e4456..87b4204b5 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -5,6 +5,7 @@ openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshi openshift_logging_fluentd_namespace: logging ### Common settings +# map_from_pairs is a custom filter plugin in role lib_utils openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" openshift_logging_fluentd_cpu_limit: null openshift_logging_fluentd_cpu_request: 100m diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 529859983..79ebbca08 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -172,8 +172,8 @@ app_port: "{{ openshift_logging_fluentd_app_port }}" ops_host: "{{ openshift_logging_fluentd_ops_host }}" ops_port: "{{ openshift_logging_fluentd_ops_port }}" - fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}" - fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}" + fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys() | first }}" + fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values() | first }}" fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}" fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}" fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}" diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml index 762e3d4d0..b60da814f 100644 --- a/roles/openshift_logging_fluentd/vars/main.yml +++ b/roles/openshift_logging_fluentd/vars/main.yml @@ -1,5 +1,5 @@ --- -__latest_fluentd_version: "3_8" -__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_fluentd_version: "3_9" +__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] __allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"] __allowed_mux_client_modes: ["minimal", "maximal"] diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml index a2c54d8e4..fed926a3b 100644 --- a/roles/openshift_logging_kibana/vars/main.yml +++ b/roles/openshift_logging_kibana/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_kibana_version: "3_8" -__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_kibana_version: "3_9" +__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index dbf4549c4..e87c8d33e 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -6,6 +6,7 @@ openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_pub openshift_logging_mux_namespace: logging ### Common settings +# map_from_pairs is a custom filter plugin in role lib_utils openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}" openshift_logging_mux_cpu_limit: null openshift_logging_mux_cpu_request: 100m diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml index 1da053b4a..e87205bad 100644 --- a/roles/openshift_logging_mux/vars/main.yml +++ b/roles/openshift_logging_mux/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_mux_version: "3_8" -__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_mux_version: "3_9" +__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index eea1401b8..b12a6b346 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -181,6 +181,7 @@ - restart master api - set_fact: + # translate_idps is a custom filter in role lib_utils translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}" # TODO: add the validate parameter when there is a validation command to run diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml index f72710832..7870f43e2 100644 --- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml @@ -12,11 +12,11 @@ package: name={{ master_pkgs | join(',') }} state=present vars: master_pkgs: - - "{{ openshift_service_type }}{{ openshift_pkg_version }}" - - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }}" + - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" register: result until: result is succeeded diff --git a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml index 8558bf3e9..995a5ab70 100644 --- a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml +++ b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml @@ -1,6 +1,8 @@ --- # Upgrade predicates - vars: + # openshift_master_facts_default_predicates is a custom lookup plugin in + # role lib_utils prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}" default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}" diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 649a4bc5d..ce27e238f 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -101,6 +101,7 @@ state: hard force: true with_items: + # certificates_to_synchronize is a custom filter in lib_utils - "{{ hostvars[inventory_hostname] | certificates_to_synchronize }}" when: master_certs_missing | bool and inventory_hostname != openshift_ca_host delegate_to: "{{ openshift_ca_host }}" diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 85d0ac25c..f450c916a 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -57,6 +57,7 @@ access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}" auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}" identity_providers: "{{ openshift_master_identity_providers | default(None) }}" + # oo_htpasswd_users_from_file is a custom filter in role lib_utils htpasswd_users: "{{ openshift_master_htpasswd_users | default(lookup('file', openshift_master_htpasswd_file) | oo_htpasswd_users_from_file if openshift_master_htpasswd_file is defined else None) }}" manage_htpasswd: "{{ openshift_master_manage_htpasswd | default(true) }}" ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}" @@ -90,6 +91,8 @@ - name: Set Default scheduler predicates and priorities set_fact: + # openshift_master_facts_default_predicates is a custom lookup plugin in + # role lib_utils openshift_master_scheduler_default_predicates: "{{ lookup('openshift_master_facts_default_predicates') }}" openshift_master_scheduler_default_priorities: "{{ lookup('openshift_master_facts_default_priorities') }}" diff --git a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py deleted file mode 100644 index 6ed6d404c..000000000 --- a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use with openshift named certificates -''' - - -class FilterModule(object): - ''' Custom ansible filters for use with openshift named certificates''' - - @staticmethod - def oo_named_certificates_list(named_certificates): - ''' Returns named certificates list with correct fields for the master - config file.''' - return [{'certFile': named_certificate['certfile'], - 'keyFile': named_certificate['keyfile'], - 'names': named_certificate['names']} for named_certificate in named_certificates] - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {"oo_named_certificates_list": self.oo_named_certificates_list} diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml index 91a358095..d4b47bb9e 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml @@ -12,7 +12,7 @@ until: result is succeeded vars: openshift_node_upgrade_rpm_list: - - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" - "PyYAML" - "dnsmasq" diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml index c9094e05a..ef5d8d662 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml @@ -14,6 +14,6 @@ until: result is succeeded vars: openshift_node_upgrade_rpm_list: - - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" - "PyYAML" - "openvswitch" diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 index a829da34f..1e73c9e1c 100644 --- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -212,6 +212,9 @@ resources: host-type: { get_param: type } sub-host-type: { get_param: subtype } node_labels: { get_param: node_labels } +{% if openshift_openstack_dns_nameservers %} + openshift_hostname: { get_param: name } +{% endif %} scheduler_hints: { get_param: scheduler_hints } {% if use_trunk_ports|default(false)|bool %} diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml index 0b4dd7d1f..b1d9c8cca 100644 --- a/roles/openshift_persistent_volumes/tasks/main.yml +++ b/roles/openshift_persistent_volumes/tasks/main.yml @@ -26,7 +26,8 @@ when: openshift_hosted_registry_storage_glusterfs_swap | default(False) - name: create standard pv and pvc lists - # generate_pv_pvcs_list is a custom action module defined in ../action_plugins + # generate_pv_pvcs_list is a custom action module defined in + # roles/lib_utils/action_plugins/generate_pv_pvcs_list.py generate_pv_pvcs_list: {} register: l_pv_pvcs_list diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py index 72c47b8ee..14f1f72c2 100644 --- a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py +++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py @@ -6,15 +6,6 @@ import re -# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - def vars_with_pattern(source, pattern=""): ''' Returns a list of variables whose name matches the given pattern ''' if source == '': @@ -39,6 +30,5 @@ class FilterModule(object): def filters(self): ''' Returns the names of the filters provided by this class ''' return { - 'map_from_pairs': map_from_pairs, 'vars_with_pattern': vars_with_pattern } diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml index e478023f8..72110b18c 100644 --- a/roles/openshift_service_catalog/tasks/generate_certs.yml +++ b/roles/openshift_service_catalog/tasks/generate_certs.yml @@ -59,11 +59,6 @@ src: "{{ generated_certs_dir }}/ca.crt" register: apiserver_ca -- shell: > - {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" - register: get_apiservices - changed_when: no - - name: Create api service oc_obj: state: present @@ -86,4 +81,3 @@ caBundle: "{{ apiserver_ca.content }}" groupPriorityMinimum: 20 versionPriority: 10 - when: "'not found' in get_apiservices.stdout" diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index cfecaa12c..9b38a85c4 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -179,6 +179,8 @@ etcd_servers: "{{ openshift.master.etcd_urls | join(',') }}" etcd_cafile: "{{ '/etc/origin/master/master.etcd-ca.crt' if etcd_ca_crt.stat.exists else '/etc/origin/master/ca-bundle.crt' }}" node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}" + # apiserver_ca is defined in generate_certs.yml + ca_hash: "{{ apiserver_ca.content|hash('sha1') }}" - name: Set Service Catalog API Server daemonset oc_obj: diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2 index 4f51b8c3c..e345df32c 100644 --- a/roles/openshift_service_catalog/templates/api_server.j2 +++ b/roles/openshift_service_catalog/templates/api_server.j2 @@ -14,6 +14,8 @@ spec: type: RollingUpdate template: metadata: + annotations: + ca_hash: {{ ca_hash }} labels: app: apiserver spec: diff --git a/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml new file mode 100644 index 000000000..34af652c2 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml @@ -0,0 +1,133 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: deploy-heketi + labels: + glusterfs: heketi-template + deploy-heketi: support + annotations: + description: Bootstrap Heketi installation + tags: glusterfs,heketi,installation +objects: +- kind: Service + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-service + deploy-heketi: support + annotations: + description: Exposes Heketi service + spec: + ports: + - name: deploy-heketi-${CLUSTER_NAME} + port: 8080 + targetPort: 8080 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-route + deploy-heketi: support + spec: + to: + kind: Service + name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-dc + deploy-heketi: support + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + deploy-heketi: support + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: ${HEKETI_FSTAB} + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_FSTAB + displayName: heketi fstab path + description: Set the fstab path, file that is populated with bricks that heketi creates + value: /var/lib/heketi/fstab +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml new file mode 100644 index 000000000..064b51473 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml @@ -0,0 +1,67 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: gluster-s3-pvcs + labels: + glusterfs: s3-pvcs-template + gluster-s3: pvcs-template + annotations: + description: Gluster S3 service template + tags: glusterfs,heketi,gluster-s3 +objects: +- kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: "${PVC}" + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc + annotations: + volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}" + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: "${PVC_SIZE}" +- kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: "${META_PVC}" + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc + annotations: + volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}" + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: "${META_PVC_SIZE}" +parameters: +- name: S3_ACCOUNT + displayName: S3 Account Name + description: S3 storage account which will provide storage on GlusterFS volumes + required: true +- name: PVC + displayName: Primary GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage + required: true +- name: PVC_SIZE + displayName: Primary GlusterFS-backed PVC capacity + description: Capacity for GlusterFS-backed PVC for object storage + value: 2Gi +- name: META_PVC + displayName: Metadata GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage metadata + required: true +- name: META_PVC_SIZE + displayName: Metadata GlusterFS-backed PVC capacity + description: Capacity for GlusterFS-backed PVC for object storage metadata + value: 1Gi +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml new file mode 100644 index 000000000..896a1b226 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml @@ -0,0 +1,140 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: gluster-s3 + labels: + glusterfs: s3-template + gluster-s3: template + annotations: + description: Gluster S3 service template + tags: glusterfs,heketi,gluster-s3 +objects: +- kind: Service + apiVersion: v1 + metadata: + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service + spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + selector: + glusterfs: s3-pod + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} +- kind: Route + apiVersion: v1 + metadata: + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route + spec: + to: + kind: Service + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc + annotations: + openshift.io/scc: privileged + description: Defines how to deploy gluster s3 object storage + spec: + replicas: 1 + selector: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod + template: + metadata: + name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3 + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod + spec: + containers: + - name: gluster-s3 + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + ports: + - name: gluster + containerPort: 8080 + protocol: TCP + env: + - name: S3_ACCOUNT + value: "${S3_ACCOUNT}" + - name: S3_USER + value: "${S3_USER}" + - name: S3_PASSWORD + value: "${S3_PASSWORD}" + resources: {} + volumeMounts: + - name: gluster-vol1 + mountPath: "/mnt/gluster-object/${S3_ACCOUNT}" + - name: gluster-vol2 + mountPath: "/mnt/gluster-object/gsmetadata" + - name: glusterfs-cgroup + readOnly: true + mountPath: "/sys/fs/cgroup" + terminationMessagePath: "/dev/termination-log" + securityContext: + privileged: true + volumes: + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: gluster-vol1 + persistentVolumeClaim: + claimName: ${PVC} + - name: gluster-vol2 + persistentVolumeClaim: + claimName: ${META_PVC} + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + serviceAccountName: default + serviceAccount: default + securityContext: {} +parameters: +- name: IMAGE_NAME + displayName: glusterblock provisioner container image name + required: True +- name: IMAGE_VERSION + displayName: glusterblock provisioner container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage +- name: S3_ACCOUNT + displayName: S3 Account Name + description: S3 storage account which will provide storage on GlusterFS volumes + required: true +- name: S3_USER + displayName: S3 User + description: S3 user who can access the S3 storage account + required: true +- name: S3_PASSWORD + displayName: S3 User Password + description: Password for the S3 user + required: true +- name: PVC + displayName: Primary GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage + value: gluster-s3-claim +- name: META_PVC + displayName: Metadata GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage metadata + value: gluster-s3-meta-claim +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml new file mode 100644 index 000000000..63dd5cce6 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml @@ -0,0 +1,104 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterblock-provisioner + labels: + glusterfs: block-template + glusterblock: template + annotations: + description: glusterblock provisioner template + tags: glusterfs +objects: +- kind: ClusterRole + apiVersion: v1 + metadata: + name: glusterblock-provisioner-runner + labels: + glusterfs: block-provisioner-runner-clusterrole + glusterblock: provisioner-runner-clusterrole + rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create", "delete"] + - apiGroups: [""] + resources: ["routes"] + verbs: ["get", "list"] +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: glusterblock-${CLUSTER_NAME}-provisioner + labels: + glusterfs: block-${CLUSTER_NAME}-provisioner-sa + glusterblock: ${CLUSTER_NAME}-provisioner-sa +- apiVersion: v1 + kind: ClusterRoleBinding + metadata: + name: glusterblock-${CLUSTER_NAME}-provisioner + roleRef: + name: glusterblock-provisioner-runner + subjects: + - kind: ServiceAccount + name: glusterblock-${CLUSTER_NAME}-provisioner + namespace: ${NAMESPACE} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: glusterblock-${CLUSTER_NAME}-provisioner-dc + labels: + glusterfs: block-${CLUSTER_NAME}-provisioner-dc + glusterblock: ${CLUSTER_NAME}-provisioner-dc + annotations: + description: Defines how to deploy the glusterblock provisioner pod. + spec: + replicas: 1 + selector: + glusterfs: block-${CLUSTER_NAME}-provisioner-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: glusterblock-provisioner + labels: + glusterfs: block-${CLUSTER_NAME}-provisioner-pod + spec: + serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner + containers: + - name: glusterblock-provisioner + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: PROVISIONER_NAME + value: gluster.org/glusterblock +parameters: +- name: IMAGE_NAME + displayName: glusterblock provisioner container image name + required: True +- name: IMAGE_VERSION + displayName: glusterblock provisioner container image version + required: True +- name: NAMESPACE + displayName: glusterblock provisioner namespace + description: The namespace in which these resources are being created + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml new file mode 100644 index 000000000..09850a2c2 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml @@ -0,0 +1,154 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterfs + labels: + glusterfs: template + annotations: + description: GlusterFS DaemonSet template + tags: glusterfs +objects: +- kind: DaemonSet + apiVersion: extensions/v1beta1 + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-daemonset + annotations: + description: GlusterFS DaemonSet + tags: glusterfs + spec: + selector: + matchLabels: + glusterfs: ${CLUSTER_NAME}-pod + template: + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-pod + glusterfs-node: pod + spec: + nodeSelector: "${{NODE_LABELS}}" + hostNetwork: true + containers: + - name: glusterfs + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: GB_GLFS_LRU_COUNT + value: "${GB_GLFS_LRU_COUNT}" + - name: TCMU_LOGDIR + value: "${TCMU_LOGDIR}" + resources: + requests: + memory: 100Mi + cpu: 100m + volumeMounts: + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc + mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + terminationMessagePath: "/dev/termination-log" + volumes: + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + emptyDir: {} + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} +parameters: +- name: NODE_LABELS + displayName: Daemonset Node Labels + description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' + value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME + displayName: GlusterFS container image name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage +- name: GB_GLFS_LRU_COUNT + displayName: Maximum number of block hosting volumes + description: This value is to set maximum number of block hosting volumes. + value: "15" + required: true +- name: TCMU_LOGDIR + displayName: Tcmu runner log directory + description: This value is to set tcmu runner log directory + value: "/var/log/glusterfs/gluster-block" + required: true diff --git a/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml new file mode 100644 index 000000000..28cdb2982 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: heketi + labels: + glusterfs: heketi-template + annotations: + description: Heketi service deployment template + tags: glusterfs,heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-service + heketi: ${CLUSTER_NAME}-service + annotations: + description: Exposes Heketi service + spec: + ports: + - name: heketi + port: 8080 + targetPort: 8080 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: heketi-${CLUSTER_NAME}-route + heketi: ${CLUSTER_NAME}-route + spec: + to: + kind: Service + name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-dc + heketi: ${CLUSTER_NAME}-dc + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-pod + heketi: ${CLUSTER_NAME}-pod + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: ${HEKETI_FSTAB} + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + glusterfs: + endpoints: heketi-db-${CLUSTER_NAME}-endpoints + path: heketidbstorage + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_FSTAB + displayName: heketi fstab path + description: Set the fstab path, file that is populated with bricks that heketi creates + value: /var/lib/heketi/fstab +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py deleted file mode 100644 index a86c96df7..000000000 --- a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py +++ /dev/null @@ -1,23 +0,0 @@ -''' - Openshift Storage GlusterFS class that provides useful filters used in GlusterFS -''' - - -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - -# pylint: disable=too-few-public-methods -class FilterModule(object): - ''' OpenShift Storage GlusterFS Filters ''' - - # pylint: disable=no-self-use, too-few-public-methods - def filters(self): - ''' Returns the names of the filters provided by this class ''' - return { - 'map_from_pairs': map_from_pairs - } diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 2ea7286f3..a374df0ce 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -4,6 +4,7 @@ glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}" glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_name }}" + # map_from_pairs is a custom filter plugin in role lib_utils glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}" glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index b7cff6514..544a6f491 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -4,6 +4,7 @@ glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}" glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}" + # map_from_pairs is a custom filter plugin in role lib_utils glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}" glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}" diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml index 8ee95e36b..12916961b 100644 --- a/roles/openshift_web_console/tasks/install.yml +++ b/roles/openshift_web_console/tasks/install.yml @@ -18,6 +18,8 @@ oc_project: name: openshift-web-console state: present + node_selector: + - "" - name: Make temp directory for asset config files command: mktemp -d /tmp/console-ansible-XXXXXX diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml index 77afe1f43..662d65d9f 100644 --- a/roles/template_service_broker/vars/default_images.yml +++ b/roles/template_service_broker/vars/default_images.yml @@ -1,4 +1,4 @@ --- __template_service_broker_prefix: "docker.io/openshift/" __template_service_broker_version: "latest" -__template_service_broker_image_name: "origin" +__template_service_broker_image_name: "origin-template-service-broker" diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml index dfab1e01b..16a08e72f 100644 --- a/roles/template_service_broker/vars/openshift-enterprise.yml +++ b/roles/template_service_broker/vars/openshift-enterprise.yml @@ -1,4 +1,4 @@ --- __template_service_broker_prefix: "registry.access.redhat.com/openshift3/" __template_service_broker_version: "v3.7" -__template_service_broker_image_name: "ose" +__template_service_broker_image_name: "ose-template-service-broker" |