diff options
Diffstat (limited to 'roles')
179 files changed, 2056 insertions, 959 deletions
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml index 9eb9db316..fa982d533 100644 --- a/roles/ansible_service_broker/defaults/main.yml +++ b/roles/ansible_service_broker/defaults/main.yml @@ -6,6 +6,14 @@ ansible_service_broker_log_level: info ansible_service_broker_output_request: false ansible_service_broker_recovery: true ansible_service_broker_bootstrap_on_startup: true -# Recommended you do not enable this for now ansible_service_broker_dev_broker: false +ansible_service_broker_refresh_interval: 600s +# Recommended you do not enable this for now ansible_service_broker_launch_apb_on_bind: false + +ansible_service_broker_image_pull_policy: IfNotPresent +ansible_service_broker_sandbox_role: edit +ansible_service_broker_auto_escalate: true +ansible_service_broker_registry_tag: latest +ansible_service_broker_registry_whitelist: + - '.*-apb$' diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index b3797ef96..0f4b71124 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -17,16 +17,24 @@ ansible_service_broker_etcd_image_etcd_path: "{{ ansible_service_broker_etcd_image_etcd_path | default(__ansible_service_broker_etcd_image_etcd_path) }}" ansible_service_broker_registry_type: "{{ ansible_service_broker_registry_type | default(__ansible_service_broker_registry_type) }}" + ansible_service_broker_registry_name: "{{ ansible_service_broker_registry_name | default(__ansible_service_broker_registry_name) }}" ansible_service_broker_registry_url: "{{ ansible_service_broker_registry_url | default(__ansible_service_broker_registry_url) }}" ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}" ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}" ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}" + ansible_service_broker_certs_dir: "{{ openshift.common.config_base }}/service-catalog" + - name: set ansible-service-broker image facts using set prefix and tag set_fact: ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}" ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}" +- slurp: + src: "{{ ansible_service_broker_certs_dir }}/ca.crt" + register: catalog_ca + + - include: validate_facts.yml @@ -42,53 +50,119 @@ namespace: openshift-ansible-service-broker state: present -- name: Set SA cluster-role +- name: create ansible-service-broker client serviceaccount + oc_serviceaccount: + name: asb-client + namespace: openshift-ansible-service-broker + state: present + +- name: Create asb-auth cluster role + oc_clusterrole: + state: present + name: asb-auth + rules: + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["create", "delete"] + - apiGroups: ["authorization.openshift.io"] + resources: ["subjectrulesreview"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + +- name: Create asb-access cluster role + oc_clusterrole: + state: present + name: asb-access + rules: + - nonResourceURLs: ["/ansible-service-broker", "ansible-service-broker/*"] + verbs: ["get", "post", "put", "patch", "delete"] + +- name: Bind admin cluster-role to asb serviceaccount oc_adm_policy_user: state: present - namespace: "openshift-ansible-service-broker" + namespace: openshift-ansible-service-broker resource_kind: cluster-role resource_name: admin user: "system:serviceaccount:openshift-ansible-service-broker:asb" -- name: create ansible-service-broker service - oc_service: - name: asb +- name: Bind auth cluster role to asb service account + oc_adm_policy_user: + state: present namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-auth + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + +- name: Bind asb-access role to asb-client service account + oc_adm_policy_user: state: present - labels: - app: openshift-ansible-service-broker - service: asb - ports: - - name: port-1338 - port: 1338 - selector: - app: openshift-ansible-service-broker - service: asb + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-access + user: "system:serviceaccount:openshift-ansible-service-broker:asb-client" -- name: create etcd service - oc_service: - name: etcd +- name: create asb-client token secret + oc_obj: + name: asb-client + state: present + kind: Secret + content: + path: /tmp/asbclientsecretout + data: + apiVersion: v1 + kind: Secret + metadata: + name: asb-client + annotations: + kubernetes.io/service-account.name: asb-client + type: kubernetes.io/service-account-token + +# Using oc_obj because oc_service doesn't seem to allow annotations +# TODO: Extend oc_service to allow annotations +- name: create ansible-service-broker service + oc_obj: + name: asb namespace: openshift-ansible-service-broker state: present - ports: - - name: etcd-advertise - port: 2379 - selector: - app: openshift-ansible-service-broker - service: etcd + kind: Service + content: + path: /tmp/asbsvcout + data: + apiVersion: v1 + kind: Service + metadata: + name: asb + labels: + app: openshift-ansible-service-broker + service: asb + annotations: + service.alpha.openshift.io/serving-cert-secret-name: asb-tls + spec: + ports: + - name: port-1338 + port: 1338 + targetPort: 1338 + protocol: TCP + selector: + app: openshift-ansible-service-broker + service: asb - name: create route for ansible-service-broker service oc_route: name: asb-1338 namespace: openshift-ansible-service-broker state: present + labels: + app: openshift-ansible-service-broker + service: asb service_name: asb port: 1338 - register: asb_route_out - -- name: get ansible-service-broker route name - set_fact: - ansible_service_broker_route: "{{ asb_route_out.results.results[0].spec.host }}" + tls_termination: Reencrypt - name: create persistent volume claim for etcd oc_obj: @@ -97,7 +171,7 @@ state: present kind: PersistentVolumeClaim content: - path: /tmp/dcout + path: /tmp/pvcout data: apiVersion: v1 kind: PersistentVolumeClaim @@ -111,50 +185,61 @@ requests: storage: 1Gi -- name: create etcd deployment +- name: Create Ansible Service Broker deployment config oc_obj: - name: etcd + name: asb namespace: openshift-ansible-service-broker state: present - kind: Deployment + kind: DeploymentConfig content: path: /tmp/dcout data: - apiVersion: extensions/v1beta1 - kind: Deployment + apiVersion: v1 + kind: DeploymentConfig metadata: - name: etcd - namespace: openshift-ansible-service-broker + name: asb labels: app: openshift-ansible-service-broker - service: etcd + service: asb spec: + replicas: 1 selector: - matchLabels: - app: openshift-ansible-service-broker - service: etcd + app: openshift-ansible-service-broker strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - replicas: 1 + type: Rolling template: metadata: labels: app: openshift-ansible-service-broker - service: etcd + service: asb spec: - restartPolicy: Always + serviceAccount: asb containers: + - image: "{{ ansible_service_broker_image }}" + name: asb + imagePullPolicy: IfNotPresent + volumeMounts: + - name: config-volume + mountPath: /etc/ansible-service-broker + - name: asb-tls + mountPath: /etc/tls/private + ports: + - containerPort: 1338 + protocol: TCP + env: + - name: BROKER_CONFIG + value: /etc/ansible-service-broker/config.yaml + resources: {} + terminationMessagePath: /tmp/termination-log + - image: "{{ ansible_service_broker_etcd_image }}" name: etcd imagePullPolicy: IfNotPresent terminationMessagePath: /tmp/termination-log workingDir: /etcd args: - - '{{ ansible_service_broker_etcd_image_etcd_path }}' - - --data-dir=/data + - "{{ ansible_service_broker_etcd_image_etcd_path }}" + - "--data-dir=/data" - "--listen-client-urls=http://0.0.0.0:2379" - "--advertise-client-urls=http://0.0.0.0:2379" ports: @@ -170,57 +255,15 @@ - name: etcd persistentVolumeClaim: claimName: etcd - -- name: create ansible-service-broker deployment - oc_obj: - name: asb - namespace: openshift-ansible-service-broker - state: present - kind: Deployment - content: - path: /tmp/dcout - data: - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: asb - namespace: openshift-ansible-service-broker - labels: - app: openshift-ansible-service-broker - service: asb - spec: - strategy: - type: Recreate - replicas: 1 - template: - metadata: - labels: - app: openshift-ansible-service-broker - service: asb - spec: - serviceAccount: asb - restartPolicy: Always - containers: - - image: "{{ ansible_service_broker_image }}" - name: asb - imagePullPolicy: IfNotPresent - volumeMounts: - - name: config-volume - mountPath: /etc/ansible-service-broker - ports: - - containerPort: 1338 - protocol: TCP - env: - - name: BROKER_CONFIG - value: /etc/ansible-service-broker/config.yaml - terminationMessagePath: /tmp/termination-log - volumes: - name: config-volume configMap: name: broker-config items: - key: broker-config path: config.yaml + - name: asb-tls + secret: + secretName: asb-tls # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: @@ -239,42 +282,65 @@ name: broker-config namespace: openshift-ansible-service-broker labels: - app: ansible-service-broker + app: openshift-ansible-service-broker data: broker-config: | registry: - name: "{{ ansible_service_broker_registry_type }}" - url: "{{ ansible_service_broker_registry_url }}" - user: "{{ ansible_service_broker_registry_user }}" - pass: "{{ ansible_service_broker_registry_password }}" - org: "{{ ansible_service_broker_registry_organization }}" + - type: {{ ansible_service_broker_registry_type }} + name: {{ ansible_service_broker_registry_name }} + url: {{ ansible_service_broker_registry_url }} + user: {{ ansible_service_broker_registry_user }} + pass: {{ ansible_service_broker_registry_password }} + org: {{ ansible_service_broker_registry_organization }} + tag: {{ ansible_service_broker_registry_tag }} + white_list: {{ ansible_service_broker_registry_whitelist }} dao: - etcd_host: etcd + etcd_host: 0.0.0.0 etcd_port: 2379 log: logfile: /var/log/ansible-service-broker/asb.log stdout: true - level: "{{ ansible_service_broker_log_level }}" + level: {{ ansible_service_broker_log_level }} color: true - openshift: {} + openshift: + host: "" + ca_file: "" + bearer_token_file: "" + sandbox_role: {{ ansible_service_broker_sandbox_role }} + image_pull_policy: {{ ansible_service_broker_image_pull_policy }} broker: dev_broker: {{ ansible_service_broker_dev_broker | bool | lower }} + bootstrap_on_startup: {{ ansible_service_broker_bootstrap_on_startup | bool | lower }} + refresh_interval: {{ ansible_service_broker_refresh_interval }} launch_apb_on_bind: {{ ansible_service_broker_launch_apb_on_bind | bool | lower }} - recovery: {{ ansible_service_broker_recovery | bool | lower }} output_request: {{ ansible_service_broker_output_request | bool | lower }} - bootstrap_on_startup: {{ ansible_service_broker_bootstrap_on_startup | bool | lower }} + recovery: {{ ansible_service_broker_recovery | bool | lower }} + ssl_cert_key: /etc/tls/private/tls.key + ssl_cert: /etc/tls/private/tls.crt + auto_escalate: {{ ansible_service_broker_auto_escalate }} + auth: + - type: basic + enabled: false + - name: Create the Broker resource in the catalog oc_obj: name: ansible-service-broker state: present - kind: Broker + kind: ServiceBroker content: path: /tmp/brokerout data: apiVersion: servicecatalog.k8s.io/v1alpha1 - kind: Broker + kind: ServiceBroker metadata: name: ansible-service-broker spec: - url: http://asb.openshift-ansible-service-broker.svc:1338 + url: http://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker + authInfo: + bearer: + secretRef: + name: asb-client + namespace: openshift-ansible-service-broker + kind: Secret + caBundle: "{{ catalog_ca.content }}" diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml index 2519f9f4c..f0a6be226 100644 --- a/roles/ansible_service_broker/tasks/remove.yml +++ b/roles/ansible_service_broker/tasks/remove.yml @@ -1,16 +1,57 @@ --- -- name: remove openshift-ansible-service-broker project - oc_project: - name: openshift-ansible-service-broker - state: absent - - name: remove ansible-service-broker serviceaccount oc_serviceaccount: name: asb namespace: openshift-ansible-service-broker state: absent +- name: remove ansible-service-broker client serviceaccount + oc_serviceaccount: + name: asb-client + namespace: openshift-ansible-service-broker + state: absent + +- name: remove asb-auth cluster role + oc_clusterrole: + state: absent + name: asb-auth + +- name: remove asb-access cluster role + oc_clusterrole: + state: absent + name: asb-access + +- name: Unbind admin cluster-role to asb serviceaccount + oc_adm_policy_user: + state: absent + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: admin + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + +- name: Unbind auth cluster role to asb service account + oc_adm_policy_user: + state: absent + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-auth + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + +- name: Unbind asb-access role to asb-client service account + oc_adm_policy_user: + state: absent + namespace: openshift-ansible-service-broker + resource_kind: cluster-role + resource_name: asb-access + user: "system:serviceaccount:openshift-ansible-service-broker:asb-client" + +- name: remove asb-client token secret + oc_secret: + state: absent + name: asb-client + namespace: openshift-ansible-service-broker + - name: remove ansible-service-broker service oc_service: name: asb @@ -35,19 +76,19 @@ namespace: openshift-ansible-service-broker state: absent -- name: remove etcd deployment +- name: remove Ansible Service Broker deployment config oc_obj: - name: etcd + name: asb namespace: openshift-ansible-service-broker + kind: DeploymentConfig state: absent - kind: Deployment -- name: remove ansible-service-broker deployment +- name: remove secret for broker auth oc_obj: - name: asb + name: asb-auth-secret namespace: openshift-ansible-service-broker + kind: Broker state: absent - kind: Deployment # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: - name: remove config map for ansible-service-broker @@ -62,4 +103,9 @@ oc_obj: name: ansible-service-broker state: absent - kind: Broker + kind: ServiceBroker + +- name: remove openshift-ansible-service-broker project + oc_project: + name: openshift-ansible-service-broker + state: absent diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml index 15e448515..3e9639adf 100644 --- a/roles/ansible_service_broker/vars/default_images.yml +++ b/roles/ansible_service_broker/vars/default_images.yml @@ -8,6 +8,7 @@ __ansible_service_broker_etcd_image_tag: latest __ansible_service_broker_etcd_image_etcd_path: /usr/local/bin/etcd __ansible_service_broker_registry_type: dockerhub +__ansible_service_broker_registry_name: dh __ansible_service_broker_registry_url: null __ansible_service_broker_registry_user: null __ansible_service_broker_registry_password: null diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml index ce2ae8365..9c576cb76 100644 --- a/roles/ansible_service_broker/vars/openshift-enterprise.yml +++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml @@ -7,7 +7,9 @@ __ansible_service_broker_etcd_image_prefix: rhel7/ __ansible_service_broker_etcd_image_tag: latest __ansible_service_broker_etcd_image_etcd_path: /bin/etcd + __ansible_service_broker_registry_type: rhcc +__ansible_service_broker_registry_name: rh __ansible_service_broker_registry_url: "https://registry.access.redhat.com" __ansible_service_broker_registry_user: null __ansible_service_broker_registry_password: null diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index 274fd8603..e36dfa7b9 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -9,6 +9,8 @@ openshift_docker_additional_registries: [] openshift_docker_blocked_registries: [] openshift_docker_insecure_registries: [] +openshift_docker_ent_reg: 'registry.access.redhat.com' + # The l2_docker_* variables convert csv strings to lists, if # necessary. These variables should be used in place of their respective # openshift_docker_* counterparts to ensure the properly formatted lists are diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 7ece0e061..dae17c3ce 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -10,20 +10,28 @@ l_use_crio: "{{ openshift_use_crio | default(False) }}" l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}" +- when: + - openshift_deployment_type == 'openshift-enterprise' + assert: + that: + - "openshift_image_tag is defined" + msg: > + openshift_image_tag is a required inventory variable when installing openshift-enterprise + - name: Use Package Docker if Requested include: package_docker.yml when: - - not l_use_system_container - - not l_use_crio_only + - not l_use_system_container + - not l_use_crio_only - name: Use System Container Docker if Requested include: systemcontainer_docker.yml when: - - l_use_system_container - - not l_use_crio_only + - l_use_system_container + - not l_use_crio_only - name: Add CRI-O usage Requested include: systemcontainer_crio.yml when: - - l_use_crio - - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config'] + - l_use_crio + - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config'] diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index 0c5621259..a35335937 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -50,22 +50,40 @@ src: custom.conf.j2 when: not os_firewall_use_firewalld | default(False) | bool +- name: Add enterprise registry, if necessary + set_fact: + l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}" + when: + - openshift.common.deployment_type == 'openshift-enterprise' + - openshift_docker_ent_reg != '' + - openshift_docker_ent_reg not in l2_docker_additional_registries + - stat: path=/etc/sysconfig/docker register: docker_check -- name: Comment old registry params in /etc/sysconfig/docker +- name: Set registry params lineinfile: dest: /etc/sysconfig/docker regexp: '^{{ item.reg_conf_var }}=.*$' - line: "#{{ item.reg_conf_var }}=''# Moved to {{ containers_registries_conf_path }}" + line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" + when: + - item.reg_fact_val != [] + - docker_check.stat.isreg is defined + - docker_check.stat.isreg with_items: - reg_conf_var: ADD_REGISTRY + reg_fact_val: "{{ l2_docker_additional_registries }}" + reg_flag: --add-registry - reg_conf_var: BLOCK_REGISTRY + reg_fact_val: "{{ l2_docker_blocked_registries }}" + reg_flag: --block-registry - reg_conf_var: INSECURE_REGISTRY + reg_fact_val: "{{ l2_docker_insecure_registries }}" + reg_flag: --insecure-registry notify: - restart docker -- name: Place additional/blocked/insecure registies in /etc/containers/registries.conf +- name: Place additional/blocked/insecure registries in /etc/containers/registries.conf template: dest: "{{ containers_registries_conf_path }}" src: registries.conf diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 5b02b72be..386369d26 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -1,17 +1,18 @@ --- + # TODO: Much of this file is shared with container engine tasks - set_fact: - l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(openshift.docker.insecure_registries)) }}" - when: openshift.docker.insecure_registries + l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" + when: l2_docker_insecure_registries - set_fact: - l_crio_registries: "{{ openshift.docker.additional_registries + ['docker.io'] }}" - when: openshift.docker.additional_registries + l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}" + when: l2_docker_additional_registries - set_fact: l_crio_registries: "{{ ['docker.io'] }}" - when: not openshift.docker.additional_registries + when: not l2_docker_additional_registries - set_fact: l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" - when: openshift.docker.additional_registries + when: l2_docker_additional_registries - name: Ensure container-selinux is installed package: @@ -92,16 +93,24 @@ - block: - - name: Set to default prepend + - name: Set CRI-O image defaults set_fact: l_crio_image_prepend: "docker.io/gscrivano" l_crio_image_name: "cri-o-fedora" + l_crio_image_tag: "latest" - name: Use Centos based image when distribution is CentOS set_fact: l_crio_image_name: "cri-o-centos" when: ansible_distribution == "CentOS" + - name: Set CRI-O image tag + set_fact: + l_crio_image_tag: "{{ openshift_image_tag }}" + when: + - openshift_deployment_type == 'openshift-enterprise' + - openshift_image_tag is defined + - name: Use RHEL based image when distribution is Red Hat set_fact: l_crio_image_prepend: "registry.access.redhat.com/openshift3" @@ -110,7 +119,7 @@ - name: Set the full image name set_fact: - l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:latest" + l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}" # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548 - name: Use a specific image if requested diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 146e5f430..5b1605b58 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -1,4 +1,5 @@ --- + # If docker_options are provided we should fail. We should not install docker and ignore # the users configuration. NOTE: docker_options == inventory:openshift_docker_options - name: Fail quickly if openshift_docker_options are set @@ -89,6 +90,14 @@ - name: Set to default prepend set_fact: l_docker_image_prepend: "gscrivano" + l_docker_image_tag: "latest" + + - name: Set container engine image tag + set_fact: + l_docker_image_tag: "{{ openshift_image_tag }}" + when: + - openshift_deployment_type == 'openshift-enterprise' + - openshift_image_tag is defined - name: Use Red Hat Registry for image when distribution is Red Hat set_fact: @@ -102,7 +111,7 @@ - name: Set the full image name set_fact: - l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest" + l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:{{ l_docker_image_tag }}" # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959 - name: Use a specific image if requested @@ -148,10 +157,10 @@ # Set local versions of facts that must be in json format for container-daemon.json # NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson - set_fact: - l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}" + l_docker_insecure_registries: "{{ l2_docker_insecure_registries | default([]) | to_json }}" l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}" - l_docker_additional_registries: "{{ docker_additional_registries | default([]) | to_json }}" - l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}" + l_docker_additional_registries: "{{ l2_docker_additional_registries | default([]) | to_json }}" + l_docker_blocked_registries: "{{ l2_docker_blocked_registries | default([]) | to_json }}" l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}" # Configure container-engine using the container-daemon.json file diff --git a/roles/docker/templates/registries.conf b/roles/docker/templates/registries.conf index c55dbd84f..d379b2be0 100644 --- a/roles/docker/templates/registries.conf +++ b/roles/docker/templates/registries.conf @@ -6,7 +6,7 @@ # The default location for this configuration file is /etc/containers/registries.conf. -# The only valid categories are: 'registries', 'insecure_registies', +# The only valid categories are: 'registries', 'insecure_registries', # and 'block_registries'. diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 3cc2bbb18..18164050a 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -1,6 +1,66 @@ --- -r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" -r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(Falsel) }}" +r_etcd_common_backup_tag: '' +r_etcd_common_backup_sufix_name: '' + +# runc, docker, host +r_etcd_common_etcd_runtime: "docker" +r_etcd_common_embedded_etcd: false + +# etcd run on a host => use etcdctl command directly +# etcd run as a docker container => use docker exec +# etcd run as a runc container => use runc exec +r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}" + +# etcd server vars +etcd_conf_dir: '/etc/etcd' +r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd +etcd_system_container_conf_dir: /var/lib/etcd/etc +etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf" +etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt" +etcd_cert_file: "{{ etcd_conf_dir }}/server.crt" +etcd_key_file: "{{ etcd_conf_dir }}/server.key" +etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt" +etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt" +etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key" + +# etcd ca vars +etcd_ca_dir: "{{ etcd_conf_dir}}/ca" +etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs" +etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt" +etcd_ca_key: "{{ etcd_ca_dir }}/ca.key" +etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf" +etcd_ca_name: etcd_ca +etcd_req_ext: etcd_v3_req +etcd_ca_exts_peer: etcd_v3_ca_peer +etcd_ca_exts_server: etcd_v3_ca_server +etcd_ca_exts_self: etcd_v3_ca_self +etcd_ca_exts_client: etcd_v3_ca_client +etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl" +etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs" +etcd_ca_db: "{{ etcd_ca_dir }}/index.txt" +etcd_ca_serial: "{{ etcd_ca_dir }}/serial" +etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber" +etcd_ca_default_days: 1825 + +r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt +r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key +r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt + +# etcd server & certificate vars +etcd_hostname: "{{ inventory_hostname }}" +etcd_ip: "{{ ansible_default_ipv4.address }}" +etcd_is_atomic: False +etcd_is_containerized: False +etcd_is_thirdparty: False + +# etcd dir vars +etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}" + +# etcd ports and protocols +etcd_client_port: 2379 +etcd_peer_port: 2380 +etcd_url_scheme: http +etcd_peer_url_scheme: http etcd_initial_cluster_state: new etcd_initial_cluster_token: etcd-cluster-1 @@ -10,8 +70,15 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" -etcd_client_port: 2379 -etcd_peer_port: 2380 +etcd_peer: 127.0.0.1 +etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}" + +etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}" +# Location of the service file is fixed and not meant to be changed +etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service" + +r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(Falsel) }}" etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d" r_etcd_os_firewall_deny: [] @@ -20,3 +87,6 @@ r_etcd_os_firewall_allow: port: "{{etcd_client_port}}/tcp" - service: etcd peering port: "{{ etcd_peer_port }}/tcp" + +# set the backend quota to 4GB by default +etcd_quota_backend_bytes: 4294967296 diff --git a/roles/etcd_common/library/delegated_serial_command.py b/roles/etcd/library/delegated_serial_command.py index 0cab1ca88..0cab1ca88 100755 --- a/roles/etcd_common/library/delegated_serial_command.py +++ b/roles/etcd/library/delegated_serial_command.py diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index 537c01c0e..879ca4f4e 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -19,4 +19,3 @@ dependencies: - role: lib_openshift - role: lib_os_firewall - role: lib_utils -- role: etcd_common diff --git a/roles/etcd_common/tasks/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml index 6cb456677..11bd2310e 100644 --- a/roles/etcd_common/tasks/drop_etcdctl.yml +++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml @@ -3,7 +3,7 @@ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present when: not openshift.common.is_atomic | bool -- name: Configure etcd profile.d alises +- name: Configure etcd profile.d aliases template: dest: "/etc/profile.d/etcdctl.sh" src: etcdctl.sh.j2 diff --git a/roles/etcd/tasks/backup.yml b/roles/etcd/tasks/backup.yml new file mode 100644 index 000000000..c0538e596 --- /dev/null +++ b/roles/etcd/tasks/backup.yml @@ -0,0 +1,2 @@ +--- +- include: backup/backup.yml diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd/tasks/backup/backup.yml index 42d27c081..42d27c081 100644 --- a/roles/etcd_common/tasks/backup.yml +++ b/roles/etcd/tasks/backup/backup.yml diff --git a/roles/etcd/tasks/backup_ca_certificates.yml b/roles/etcd/tasks/backup_ca_certificates.yml new file mode 100644 index 000000000..a41b032f3 --- /dev/null +++ b/roles/etcd/tasks/backup_ca_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/backup_ca_certificates.yml diff --git a/roles/etcd/tasks/backup_generated_certificates.yml b/roles/etcd/tasks/backup_generated_certificates.yml new file mode 100644 index 000000000..8cf2a10cc --- /dev/null +++ b/roles/etcd/tasks/backup_generated_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/backup_generated_certificates.yml diff --git a/roles/etcd/tasks/backup_server_certificates.yml b/roles/etcd/tasks/backup_server_certificates.yml new file mode 100644 index 000000000..267ffeb4d --- /dev/null +++ b/roles/etcd/tasks/backup_server_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/backup_server_certificates.yml diff --git a/roles/etcd/tasks/ca.yml b/roles/etcd/tasks/ca.yml index 7cda49069..cca1e9ad7 100644 --- a/roles/etcd/tasks/ca.yml +++ b/roles/etcd/tasks/ca.yml @@ -1,2 +1,2 @@ --- -- include: ca/deploy.yml +- include: certificates/deploy_ca.yml diff --git a/roles/etcd/tasks/certificates/backup_ca_certificates.yml b/roles/etcd/tasks/certificates/backup_ca_certificates.yml new file mode 100644 index 000000000..f60eb82ef --- /dev/null +++ b/roles/etcd/tasks/certificates/backup_ca_certificates.yml @@ -0,0 +1,12 @@ +--- +- name: Determine if CA certificate directory exists + stat: + path: "{{ etcd_ca_dir }}" + register: etcd_ca_certs_dir_stat +- name: Backup generated etcd certificates + command: > + tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_ca_dir }} + args: + warn: no + when: etcd_ca_certs_dir_stat.stat.exists | bool diff --git a/roles/etcd/tasks/certificates/backup_generated_certificates.yml b/roles/etcd/tasks/certificates/backup_generated_certificates.yml new file mode 100644 index 000000000..6a24cfcb3 --- /dev/null +++ b/roles/etcd/tasks/certificates/backup_generated_certificates.yml @@ -0,0 +1,13 @@ +--- +- name: Determine if generated etcd certificates exist + stat: + path: "{{ etcd_conf_dir }}/generated_certs" + register: etcd_generated_certs_dir_stat + +- name: Backup generated etcd certificates + command: > + tar -czf {{ etcd_conf_dir }}/etcd-generated-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_conf_dir }}/generated_certs + args: + warn: no + when: etcd_generated_certs_dir_stat.stat.exists | bool diff --git a/roles/etcd/tasks/certificates/backup_server_certificates.yml b/roles/etcd/tasks/certificates/backup_server_certificates.yml new file mode 100644 index 000000000..8e6cc6965 --- /dev/null +++ b/roles/etcd/tasks/certificates/backup_server_certificates.yml @@ -0,0 +1,11 @@ +--- +- name: Backup etcd certificates + command: > + tar -czvf /etc/etcd/etcd-server-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_conf_dir }}/ca.crt + {{ etcd_conf_dir }}/server.crt + {{ etcd_conf_dir }}/server.key + {{ etcd_conf_dir }}/peer.crt + {{ etcd_conf_dir }}/peer.key + args: + warn: no diff --git a/roles/etcd/tasks/ca/deploy.yml b/roles/etcd/tasks/certificates/deploy_ca.yml index 3d32290a2..3d32290a2 100644 --- a/roles/etcd/tasks/ca/deploy.yml +++ b/roles/etcd/tasks/certificates/deploy_ca.yml diff --git a/roles/etcd/tasks/certificates/distribute_ca.yml b/roles/etcd/tasks/certificates/distribute_ca.yml new file mode 100644 index 000000000..632ac15dd --- /dev/null +++ b/roles/etcd/tasks/certificates/distribute_ca.yml @@ -0,0 +1,47 @@ +--- +- name: Create a tarball of the etcd ca certs + command: > + tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz + -C {{ etcd_ca_dir }} . + args: + creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + warn: no + delegate_to: "{{ etcd_ca_host }}" + run_once: true + +- name: Retrieve etcd ca cert tarball + fetch: + src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + dest: "{{ etcd_sync_cert_dir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + delegate_to: "{{ etcd_ca_host }}" + run_once: true + +- name: Ensure ca directory exists + file: + path: "{{ etcd_ca_dir }}" + state: directory + +- name: Unarchive etcd ca cert tarballs + unarchive: + src: "{{ etcd_sync_cert_dir }}/{{ etcd_ca_name }}.tgz" + dest: "{{ etcd_ca_dir }}" + +- name: Read current etcd CA + slurp: + src: "{{ etcd_conf_dir }}/ca.crt" + register: g_current_etcd_ca_output + +- name: Read new etcd CA + slurp: + src: "{{ etcd_ca_dir }}/ca.crt" + register: g_new_etcd_ca_output + +- copy: + content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" + dest: "{{ item }}/ca.crt" + with_items: + - "{{ etcd_conf_dir }}" + - "{{ etcd_ca_dir }}" diff --git a/roles/etcd/tasks/client_certificates/fetch_from_ca.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml index 119071a72..119071a72 100644 --- a/roles/etcd/tasks/client_certificates/fetch_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml diff --git a/roles/etcd/tasks/server_certificates/fetch_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml index 064fe1952..26492fb3c 100644 --- a/roles/etcd/tasks/server_certificates/fetch_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml @@ -1,8 +1,4 @@ --- -- include: ../ca/deploy.yml - when: - - etcd_ca_setup | default(True) | bool - - name: Install etcd package: name: "etcd{{ '-' + etcd_version if etcd_version is defined else '' }}" diff --git a/roles/etcd/tasks/certificates/remove_ca_certificates.yml b/roles/etcd/tasks/certificates/remove_ca_certificates.yml new file mode 100644 index 000000000..4a86eb60d --- /dev/null +++ b/roles/etcd/tasks/certificates/remove_ca_certificates.yml @@ -0,0 +1,5 @@ +--- +- name: Remove CA certificate directory + file: + path: "{{ etcd_ca_dir }}" + state: absent diff --git a/roles/etcd/tasks/certificates/remove_generated_certificates.yml b/roles/etcd/tasks/certificates/remove_generated_certificates.yml new file mode 100644 index 000000000..993b18de2 --- /dev/null +++ b/roles/etcd/tasks/certificates/remove_generated_certificates.yml @@ -0,0 +1,5 @@ +--- +- name: Remove generated etcd certificates + file: + path: "{{ etcd_conf_dir }}/generated_certs" + state: absent diff --git a/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml b/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml new file mode 100644 index 000000000..70b5c6523 --- /dev/null +++ b/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml @@ -0,0 +1,8 @@ +--- +- name: Retrieve etcd CA certificate + fetch: + src: "{{ etcd_conf_dir }}/ca.crt" + dest: "{{ etcd_sync_cert_dir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes diff --git a/roles/etcd/tasks/client_certificates.yml b/roles/etcd/tasks/client_certificates.yml index 2e9c078b9..2f4108a0d 100644 --- a/roles/etcd/tasks/client_certificates.yml +++ b/roles/etcd/tasks/client_certificates.yml @@ -1,2 +1,2 @@ --- -- include: client_certificates/fetch_from_ca.yml +- include: certificates/fetch_client_certificates_from_ca.yml diff --git a/roles/etcd/tasks/distribute_ca b/roles/etcd/tasks/distribute_ca new file mode 100644 index 000000000..040c5f7af --- /dev/null +++ b/roles/etcd/tasks/distribute_ca @@ -0,0 +1,2 @@ +--- +- include: certificates/distribute_ca.yml diff --git a/roles/etcd/tasks/drop_etcdctl.yml b/roles/etcd/tasks/drop_etcdctl.yml new file mode 100644 index 000000000..4c1f609f7 --- /dev/null +++ b/roles/etcd/tasks/drop_etcdctl.yml @@ -0,0 +1,2 @@ +--- +- include: auxiliary/drop_etcdctl.yml diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 870c11ad4..3e69af314 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -1,6 +1,4 @@ --- -- include: server_certificates.yml - - name: Set hostname and ip facts set_fact: # Store etcd_hostname and etcd_ip such that they will be available @@ -16,10 +14,7 @@ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present when: not etcd_is_containerized | bool -- include_role: - name: etcd_common - vars: - r_etcd_common_action: drop_etcdctl +- include: drop_etcdctl.yml when: - openshift_etcd_etcdctl_profile | default(true) | bool diff --git a/roles/etcd/tasks/remove_ca_certificates.yml b/roles/etcd/tasks/remove_ca_certificates.yml new file mode 100644 index 000000000..36df1a1cc --- /dev/null +++ b/roles/etcd/tasks/remove_ca_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/remove_ca_certificates.yml diff --git a/roles/etcd/tasks/remove_generated_certificates.yml b/roles/etcd/tasks/remove_generated_certificates.yml new file mode 100644 index 000000000..b10a4b32d --- /dev/null +++ b/roles/etcd/tasks/remove_generated_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/remove_generated_certificates.yml diff --git a/roles/etcd/tasks/retrieve_ca_certificates.yml b/roles/etcd/tasks/retrieve_ca_certificates.yml new file mode 100644 index 000000000..bd6c4ec85 --- /dev/null +++ b/roles/etcd/tasks/retrieve_ca_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/retrieve_ca_certificates.yml diff --git a/roles/etcd/tasks/server_certificates.yml b/roles/etcd/tasks/server_certificates.yml index f0ba58b6e..ae26079f9 100644 --- a/roles/etcd/tasks/server_certificates.yml +++ b/roles/etcd/tasks/server_certificates.yml @@ -1,2 +1,6 @@ --- -- include: server_certificates/fetch_from_ca.yml +- include: ca.yml + when: + - etcd_ca_setup | default(True) | bool + +- include: certificates/fetch_server_certificates_from_ca.yml diff --git a/roles/etcd/tasks/upgrade/upgrade_image.yml b/roles/etcd/tasks/upgrade/upgrade_image.yml index cea95a1b3..24071f9ad 100644 --- a/roles/etcd/tasks/upgrade/upgrade_image.yml +++ b/roles/etcd/tasks/upgrade/upgrade_image.yml @@ -20,6 +20,11 @@ regexp: "{{ current_image.stdout }}$" replace: "{{ new_etcd_image }}" +- lineinfile: + destfile: "{{ etcd_conf_file }}" + regexp: '^ETCD_QUOTA_BACKEND_BYTES=' + line: "ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}" + - name: Restart etcd_container systemd: name: "{{ etcd_service }}" diff --git a/roles/etcd/tasks/upgrade/upgrade_rpm.yml b/roles/etcd/tasks/upgrade/upgrade_rpm.yml index 324b69605..505e28afb 100644 --- a/roles/etcd/tasks/upgrade/upgrade_rpm.yml +++ b/roles/etcd/tasks/upgrade/upgrade_rpm.yml @@ -19,6 +19,11 @@ name: "{{ l_etcd_target_package }}" state: latest +- lineinfile: + destfile: "{{ etcd_conf_file }}" + regexp: '^ETCD_QUOTA_BACKEND_BYTES=' + line: "ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}" + - name: Restart etcd service: name: "{{ etcd_service }}" diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 index 2c2803aee..8462bb4c8 100644 --- a/roles/etcd/templates/etcd.conf.j2 +++ b/roles/etcd/templates/etcd.conf.j2 @@ -45,6 +45,7 @@ ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} #ETCD_STRICT_RECONFIG_CHECK="false" #ETCD_AUTO_COMPACTION_RETENTION="0" #ETCD_ENABLE_V2="true" +ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }} #[proxy] #ETCD_PROXY=off diff --git a/roles/etcd_common/templates/etcdctl.sh.j2 b/roles/etcd/templates/etcdctl.sh.j2 index ac7d9c72f..ac7d9c72f 100644 --- a/roles/etcd_common/templates/etcdctl.sh.j2 +++ b/roles/etcd/templates/etcdctl.sh.j2 diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md deleted file mode 100644 index d1c3a6602..000000000 --- a/roles/etcd_common/README.md +++ /dev/null @@ -1,53 +0,0 @@ -etcd_common -======================== - -Common resources for dependent etcd roles. E.g. default variables for: -* config directories -* certificates -* ports -* other settings - -Or `delegated_serial_command` ansible module for executing a command on a remote node. E.g. - -```yaml -- delegated_serial_command: - command: /usr/bin/make_database.sh arg1 arg2 - creates: /path/to/database -``` - -Or etcdctl.yml playbook for installation of `etcdctl` aliases on a node (see example). - -Dependencies ------------- - -openshift-repos - -Example Playbook ----------------- - -**Drop etcdctl aliases** - -```yaml -- include_role: - name: etcd_common - tasks_from: etcdctl -``` - -**Get access to common variables** - -```yaml -# meta.yml of etcd -... -dependencies: -- { role: etcd_common } -``` - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml deleted file mode 100644 index 6705e1ac5..000000000 --- a/roles/etcd_common/defaults/main.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- -# Default action when calling this role -r_etcd_common_action: noop -r_etcd_common_backup_tag: '' -r_etcd_common_backup_sufix_name: '' - -# runc, docker, host -r_etcd_common_etcd_runtime: "docker" -r_etcd_common_embedded_etcd: false - -# etcd run on a host => use etcdctl command directly -# etcd run as a docker container => use docker exec -# etcd run as a runc container => use runc exec -r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}" - -# etcd server vars -etcd_conf_dir: '/etc/etcd' -r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd -etcd_system_container_conf_dir: /var/lib/etcd/etc -etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf" -etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt" -etcd_cert_file: "{{ etcd_conf_dir }}/server.crt" -etcd_key_file: "{{ etcd_conf_dir }}/server.key" -etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt" -etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt" -etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key" - -# etcd ca vars -etcd_ca_dir: "{{ etcd_conf_dir}}/ca" -etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs" -etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt" -etcd_ca_key: "{{ etcd_ca_dir }}/ca.key" -etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf" -etcd_ca_name: etcd_ca -etcd_req_ext: etcd_v3_req -etcd_ca_exts_peer: etcd_v3_ca_peer -etcd_ca_exts_server: etcd_v3_ca_server -etcd_ca_exts_self: etcd_v3_ca_self -etcd_ca_exts_client: etcd_v3_ca_client -etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl" -etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs" -etcd_ca_db: "{{ etcd_ca_dir }}/index.txt" -etcd_ca_serial: "{{ etcd_ca_dir }}/serial" -etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber" -etcd_ca_default_days: 1825 - -r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt -r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key -r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt - -# etcd server & certificate vars -etcd_hostname: "{{ inventory_hostname }}" -etcd_ip: "{{ ansible_default_ipv4.address }}" -etcd_is_atomic: False -etcd_is_containerized: False -etcd_is_thirdparty: False - -# etcd dir vars -etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}" - -# etcd ports and protocols -etcd_client_port: 2379 -etcd_peer_port: 2380 -etcd_url_scheme: http -etcd_peer_url_scheme: http - -etcd_initial_cluster_state: new -etcd_initial_cluster_token: etcd-cluster-1 - -etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}" -etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}" -etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" -etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" - -etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d" - -# etcd_peer needs to be set by a role caller -etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}" diff --git a/roles/etcd_common/meta/main.yml b/roles/etcd_common/meta/main.yml deleted file mode 100644 index dfb1c7a2c..000000000 --- a/roles/etcd_common/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.9 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: [] diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml deleted file mode 100644 index 6ed87e6c7..000000000 --- a/roles/etcd_common/tasks/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Fail if invalid r_etcd_common_action provided - fail: - msg: "etcd_common role can only be called with 'noop' or 'backup' or 'drop_etcdctl'" - when: r_etcd_common_action not in ['noop', 'backup', 'drop_etcdctl'] - -- name: Include main action task file - include: "{{ r_etcd_common_action }}.yml" - when: r_etcd_common_action != "noop" diff --git a/roles/etcd_common/tasks/noop.yml b/roles/etcd_common/tasks/noop.yml deleted file mode 100644 index a88d78235..000000000 --- a/roles/etcd_common/tasks/noop.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -# This is file is here because the usage of tags, specifically `pre_upgrade` -# breaks the functionality of this role. -# See https://bugzilla.redhat.com/show_bug.cgi?id=1464025 diff --git a/roles/etcd_common/vars/main.yml b/roles/etcd_common/vars/main.yml deleted file mode 100644 index 00d697776..000000000 --- a/roles/etcd_common/vars/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}" -# Location of the service file is fixed and not meant to be changed -etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service" diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md new file mode 100644 index 000000000..321acca21 --- /dev/null +++ b/roles/installer_checkpoint/README.md @@ -0,0 +1,177 @@ +OpenShift-Ansible Installer Checkpoint +====================================== + +A complete OpenShift cluster installation is comprised of many different +components which can take 30 minutes to several hours to complete. If the +installation should fail, it could be confusing to understand at which component +the failure occurred. Additionally, it may be desired to re-run only the +component which failed instead of starting over from the beginning. Components +which came after the failed component would also need to be run individually. + +Design +------ + +The Installer Checkpoint implements an Ansible callback plugin to allow +displaying and logging of the installer status at the end of a playbook run. + +To ensure the callback plugin is loaded, regardless of ansible.cfg file +configuration, the plugin has been placed inside the installer_checkpoint role +which must be called early in playbook execution. The `std_include.yml` playbook +is run first for all entry point playbooks, therefore, the initialization of the +checkpoint plugin has been placed at the beginning of that file. + +Playbooks use the [set_stats][set_stats] Ansible module to set a custom stats +variable indicating the status of the phase being executed. + +The installer_checkpoint.py callback plugin extends the Ansible +`v2_playbook_on_stats` method, which is called at the end of a playbook run, to +display the status of each phase which was run. The INSTALLER STATUS report is +displayed immediately following the PLAY RECAP. + +Phases of cluster installation are mapped to the steps in the +[common/openshift-cluster/config.yml][openshift_cluster_config] playbook. + +To correctly display the order of the installer phases, the `installer_phases` +variable defines the phase or component order. + +```python + # Set the order of the installer phases + installer_phases = [ + 'installer_phase_initialize', + 'installer_phase_etcd', + 'installer_phase_nfs', + 'installer_phase_loadbalancer', + 'installer_phase_master', + 'installer_phase_master_additional', + 'installer_phase_node', + 'installer_phase_glusterfs', + 'installer_phase_hosted', + 'installer_phase_metrics', + 'installer_phase_logging', + 'installer_phase_servicecatalog', + ] +``` + +Additional attributes, such as display title and component playbook, of each +phase are stored in the `phase_attributes` variable. + +```python + # Define the attributes of the installer phases + phase_attributes = { + 'installer_phase_initialize': { + 'title': 'Initialization', + 'playbook': '' + }, + 'installer_phase_etcd': { + 'title': 'etcd Install', + 'playbook': 'playbooks/byo/openshift-etcd/config.yml' + }, + 'installer_phase_nfs': { + 'title': 'NFS Install', + 'playbook': 'playbooks/byo/openshift-nfs/config.yml' + }, + #... + } +``` + +Usage +----- + +In order to indicate the beginning of a component installation, a play must be +added to the beginning of the main playbook for the component to set the phase +status to "In Progress". Additionally, a play must be added after the last play +for that component to set the phase status to "Complete". + +The following example shows the first play of the 'installer phase' loading the +`installer_checkpoint` role, as well as the `set_stats` task for setting +`installer_phase_initialize` to "In Progress". Various plays are run for the +phase/component and then a final play for setting `installer_hase_initialize` to +"Complete". + +```yaml +# common/openshift-cluster/std_include.yml +--- +- name: Initialization Checkpoint Start + hosts: localhost + connection: local + gather_facts: false + roles: + - installer_checkpoint + tasks: + - name: Set install initialization 'In Progress' + set_stats: + data: + installer_phase_initialize: "In Progress" + aggregate: false + +#... +# Various plays here +#... + +- name: Initialization Checkpoint End + hosts: localhost + connection: local + gather_facts: false + tasks: + - name: Set install initialization 'Complete' + set_stats: + data: + installer_phase_initialize: "Complete" + aggregate: false +``` + +Each phase or component of the installer will follow a similar pattern, with the +exception that the `installer_checkpoint` role does not need to be called since +it was already loaded by the play in `std_include.yml`. It is important to +place the 'In Progress' and 'Complete' plays as the first and last plays of the +phase or component. + +Examples +-------- + +Example display of a successful playbook run: + +``` +PLAY RECAP ********************************************************************* +master01.example.com : ok=158 changed=16 unreachable=0 failed=0 +node01.example.com : ok=469 changed=74 unreachable=0 failed=0 +node02.example.com : ok=157 changed=17 unreachable=0 failed=0 +localhost : ok=24 changed=0 unreachable=0 failed=0 + + +INSTALLER STATUS *************************************************************** +Initialization : Complete +etcd Install : Complete +NFS Install : Not Started +Load balancer Install : Not Started +Master Install : Complete +Master Additional Install : Complete +Node Install : Complete +GlusterFS Install : Not Started +Hosted Install : Complete +Metrics Install : Not Started +Logging Install : Not Started +Service Catalog Install : Not Started +``` + +Example display if a failure occurs during execution: + +``` +INSTALLER STATUS *************************************************************** +Initialization : Complete +etcd Install : Complete +NFS Install : Not Started +Load balancer Install : Not Started +Master Install : In Progress + This phase can be restarted by running: playbooks/byo/openshift-master/config.yml +Master Additional Install : Not Started +Node Install : Not Started +GlusterFS Install : Not Started +Hosted Install : Not Started +Metrics Install : Not Started +Logging Install : Not Started +Service Catalog Install : Not Started +``` + +[set_stats]: http://docs.ansible.com/ansible/latest/set_stats_module.html +[openshift_cluster_config]: https://github.com/openshift/openshift-ansible/blob/master/playbooks/common/openshift-cluster/config.yml diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py new file mode 100644 index 000000000..033240e62 --- /dev/null +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -0,0 +1,182 @@ +"""Ansible callback plugin to print a summary completion status of installation +phases. +""" +from ansible.plugins.callback import CallbackBase +from ansible import constants as C + +DOCUMENTATION = ''' + +''' + +EXAMPLES = ''' +--------------------------------------------- +Example display of a successful playbook run: + +PLAY RECAP ********************************************************************* +master01.example.com : ok=158 changed=16 unreachable=0 failed=0 +node01.example.com : ok=469 changed=74 unreachable=0 failed=0 +node02.example.com : ok=157 changed=17 unreachable=0 failed=0 +localhost : ok=24 changed=0 unreachable=0 failed=0 + + +INSTALLER STATUS *************************************************************** +Initialization : Complete +etcd Install : Complete +NFS Install : Not Started +Load balancer Install : Not Started +Master Install : Complete +Master Additional Install : Complete +Node Install : Complete +GlusterFS Install : Not Started +Hosted Install : Complete +Metrics Install : Not Started +Logging Install : Not Started +Service Catalog Install : Not Started + +----------------------------------------------------- +Example display if a failure occurs during execution: + +INSTALLER STATUS *************************************************************** +Initialization : Complete +etcd Install : Complete +NFS Install : Not Started +Load balancer Install : Not Started +Master Install : In Progress + This phase can be restarted by running: playbooks/byo/openshift-master/config.yml +Master Additional Install : Not Started +Node Install : Not Started +GlusterFS Install : Not Started +Hosted Install : Not Started +Metrics Install : Not Started +Logging Install : Not Started +Service Catalog Install : Not Started + +''' + + +class CallbackModule(CallbackBase): + """This callback summarizes installation phase status.""" + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'installer_checkpoint' + CALLBACK_NEEDS_WHITELIST = False + + def __init__(self): + super(CallbackModule, self).__init__() + + def v2_playbook_on_stats(self, stats): + + # Set the order of the installer phases + installer_phases = [ + 'installer_phase_initialize', + 'installer_phase_etcd', + 'installer_phase_nfs', + 'installer_phase_loadbalancer', + 'installer_phase_master', + 'installer_phase_master_additional', + 'installer_phase_node', + 'installer_phase_glusterfs', + 'installer_phase_hosted', + 'installer_phase_metrics', + 'installer_phase_logging', + 'installer_phase_servicecatalog', + ] + + # Define the attributes of the installer phases + phase_attributes = { + 'installer_phase_initialize': { + 'title': 'Initialization', + 'playbook': '' + }, + 'installer_phase_etcd': { + 'title': 'etcd Install', + 'playbook': 'playbooks/byo/openshift-etcd/config.yml' + }, + 'installer_phase_nfs': { + 'title': 'NFS Install', + 'playbook': 'playbooks/byo/openshift-nfs/config.yml' + }, + 'installer_phase_loadbalancer': { + 'title': 'Load balancer Install', + 'playbook': 'playbooks/byo/openshift-loadbalancer/config.yml' + }, + 'installer_phase_master': { + 'title': 'Master Install', + 'playbook': 'playbooks/byo/openshift-master/config.yml' + }, + 'installer_phase_master_additional': { + 'title': 'Master Additional Install', + 'playbook': 'playbooks/byo/openshift-master/additional_config.yml' + }, + 'installer_phase_node': { + 'title': 'Node Install', + 'playbook': 'playbooks/byo/openshift-node/config.yml' + }, + 'installer_phase_glusterfs': { + 'title': 'GlusterFS Install', + 'playbook': 'playbooks/byo/openshift-glusterfs/config.yml' + }, + 'installer_phase_hosted': { + 'title': 'Hosted Install', + 'playbook': 'playbooks/byo/openshift-cluster/openshift-hosted.yml' + }, + 'installer_phase_metrics': { + 'title': 'Metrics Install', + 'playbook': 'playbooks/byo/openshift-cluster/openshift-metrics.yml' + }, + 'installer_phase_logging': { + 'title': 'Logging Install', + 'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml' + }, + 'installer_phase_servicecatalog': { + 'title': 'Service Catalog Install', + 'playbook': 'playbooks/byo/openshift-cluster/service-catalog.yml' + }, + } + + # Find the longest phase title + max_column = 0 + for phase in phase_attributes: + max_column = max(max_column, len(phase_attributes[phase]['title'])) + + if '_run' in stats.custom: + self._display.banner('INSTALLER STATUS') + for phase in installer_phases: + phase_title = phase_attributes[phase]['title'] + padding = max_column - len(phase_title) + 2 + if phase in stats.custom['_run']: + phase_status = stats.custom['_run'][phase] + self._display.display( + '{}{}: {}'.format(phase_title, ' ' * padding, phase_status), + color=self.phase_color(phase_status)) + if phase_status == 'In Progress' and phase != 'installer_phase_initialize': + self._display.display( + '\tThis phase can be restarted by running: {}'.format( + phase_attributes[phase]['playbook'])) + else: + # Phase was not found in custom stats + self._display.display( + '{}{}: {}'.format(phase_title, ' ' * padding, 'Not Started'), + color=C.COLOR_SKIP) + + self._display.display("", screen_only=True) + + def phase_color(self, status): + """ Return color code for installer phase""" + valid_status = [ + 'In Progress', + 'Complete', + ] + + if status not in valid_status: + self._display.warning('Invalid phase status defined: {}'.format(status)) + + if status == 'Complete': + phase_color = C.COLOR_OK + elif status == 'In Progress': + phase_color = C.COLOR_ERROR + else: + phase_color = C.COLOR_WARN + + return phase_color diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 1e6eb2386..05b2763d5 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -1421,7 +1421,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py index 8c6a81cc8..d1dc4caf8 100644 --- a/roles/lib_openshift/library/oc_adm_csr.py +++ b/roles/lib_openshift/library/oc_adm_csr.py @@ -1399,7 +1399,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 4a7847e88..152f270ab 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -1407,7 +1407,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index b8af5cad9..3082f5890 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -1393,7 +1393,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 3364f8de3..1ceaf5d0d 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -1393,7 +1393,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index c64d7ffd2..0771aa5a5 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -1511,7 +1511,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1886,13 +1886,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -1909,6 +1911,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 492494bda..146f71f68 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1536,7 +1536,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -2230,13 +2230,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -2253,6 +2255,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index b412ca8af..9761b4b4e 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -1385,7 +1385,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 8bbc22c49..047edffbb 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -1391,7 +1391,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index ad17051cb..0b6a8436b 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -1435,7 +1435,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index 74a84ac89..1f52fba40 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -1402,7 +1402,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index eea1516ae..1b63a6c13 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -1375,7 +1375,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index dc33d3b8a..94b08d9ce 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -1394,7 +1394,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index 88fd9554d..ad837fdb5 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -1411,7 +1411,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 8408f9ebc..892546e56 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -1414,7 +1414,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index d1be0b534..38df585f0 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -1346,7 +1346,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 9a281e6cd..70632f86d 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -1403,7 +1403,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index b503c330b..4eee748d7 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -1400,7 +1400,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 7a9e3bf89..2e73a7645 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -1407,7 +1407,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index 875e473ad..e003770d8 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -90,6 +90,12 @@ options: required: false default: str aliases: [] + labels: + description: + - The labels to apply on the route + required: false + default: None + aliases: [] tls_termination: description: - The options for termination. e.g. reencrypt @@ -1445,7 +1451,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1469,6 +1475,7 @@ class RouteConfig(object): sname, namespace, kubeconfig, + labels=None, destcacert=None, cacert=None, cert=None, @@ -1483,6 +1490,7 @@ class RouteConfig(object): self.kubeconfig = kubeconfig self.name = sname self.namespace = namespace + self.labels = labels self.host = host self.tls_termination = tls_termination self.destcacert = destcacert @@ -1508,6 +1516,8 @@ class RouteConfig(object): self.data['metadata'] = {} self.data['metadata']['name'] = self.name self.data['metadata']['namespace'] = self.namespace + if self.labels: + self.data['metadata']['labels'] = self.labels self.data['spec'] = {} self.data['spec']['host'] = self.host @@ -1715,6 +1725,7 @@ class OCRoute(OpenShiftCLI): rconfig = RouteConfig(params['name'], params['namespace'], params['kubeconfig'], + params['labels'], files['destcacert']['value'], files['cacert']['value'], files['cert']['value'], @@ -1819,6 +1830,7 @@ def main(): state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), + labels=dict(default=None, type='dict'), name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), tls_termination=dict(default=None, type='str'), diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index ec3635753..c142f1f43 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -1389,7 +1389,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index c010607e8..0614f359d 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -90,6 +90,12 @@ options: required: false default: default aliases: [] + annotations: + description: + - Annotations to apply to the object + required: false + default: None + aliases: [] files: description: - A list of files provided for secrets @@ -1441,7 +1447,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: @@ -1464,13 +1470,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -1487,6 +1495,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): @@ -1698,8 +1708,7 @@ class OCSecret(OpenShiftCLI): elif params['contents']: files = Utils.create_tmp_files_from_contents(params['contents']) else: - return {'failed': True, - 'msg': 'Either specify files or contents.'} + files = [{'name': 'null', 'path': os.devnull}] ######## # Create @@ -1783,6 +1792,7 @@ def main(): debug=dict(default=False, type='bool'), namespace=dict(default='default', type='str'), name=dict(default=None, type='str'), + annotations=dict(default=None, type='dict'), type=dict(default=None, type='str'), files=dict(default=None, type='list'), delete_after=dict(default=False, type='bool'), diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index e83a6e26d..3e8aea4f1 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -1448,7 +1448,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 0d46bbf96..646a39224 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -1387,7 +1387,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 662d77ec1..99a8e8f3d 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -1387,7 +1387,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py index 574f109e4..e88f3ae8d 100644 --- a/roles/lib_openshift/library/oc_storageclass.py +++ b/roles/lib_openshift/library/oc_storageclass.py @@ -1405,7 +1405,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index e430546ee..7bbe38819 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -1447,7 +1447,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index a12620968..63adbd6ac 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -1359,7 +1359,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 134b2ad19..3c07f8d4b 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -1436,7 +1436,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/src/ansible/oc_route.py b/roles/lib_openshift/src/ansible/oc_route.py index f2f5c5095..969cf8bcd 100644 --- a/roles/lib_openshift/src/ansible/oc_route.py +++ b/roles/lib_openshift/src/ansible/oc_route.py @@ -13,6 +13,7 @@ def main(): state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), + labels=dict(default=None, type='dict'), name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), tls_termination=dict(default=None, type='str'), diff --git a/roles/lib_openshift/src/ansible/oc_secret.py b/roles/lib_openshift/src/ansible/oc_secret.py index faa7c1772..ee2827e69 100644 --- a/roles/lib_openshift/src/ansible/oc_secret.py +++ b/roles/lib_openshift/src/ansible/oc_secret.py @@ -15,6 +15,7 @@ def main(): debug=dict(default=False, type='bool'), namespace=dict(default='default', type='str'), name=dict(default=None, type='str'), + annotations=dict(default=None, type='dict'), type=dict(default=None, type='str'), files=dict(default=None, type='list'), delete_after=dict(default=False, type='bool'), diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py index 3a1bd732f..dc2f7977b 100644 --- a/roles/lib_openshift/src/class/oc_route.py +++ b/roles/lib_openshift/src/class/oc_route.py @@ -118,6 +118,7 @@ class OCRoute(OpenShiftCLI): rconfig = RouteConfig(params['name'], params['namespace'], params['kubeconfig'], + params['labels'], files['destcacert']['value'], files['cacert']['value'], files['cert']['value'], diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py index 4ee6443e9..5322d6241 100644 --- a/roles/lib_openshift/src/class/oc_secret.py +++ b/roles/lib_openshift/src/class/oc_secret.py @@ -142,8 +142,7 @@ class OCSecret(OpenShiftCLI): elif params['contents']: files = Utils.create_tmp_files_from_contents(params['contents']) else: - return {'failed': True, - 'msg': 'Either specify files or contents.'} + files = [{'name': 'null', 'path': os.devnull}] ######## # Create diff --git a/roles/lib_openshift/src/doc/route b/roles/lib_openshift/src/doc/route index a12999c9e..f0d38ab5f 100644 --- a/roles/lib_openshift/src/doc/route +++ b/roles/lib_openshift/src/doc/route @@ -39,6 +39,12 @@ options: required: false default: str aliases: [] + labels: + description: + - The labels to apply on the route + required: false + default: None + aliases: [] tls_termination: description: - The options for termination. e.g. reencrypt diff --git a/roles/lib_openshift/src/doc/secret b/roles/lib_openshift/src/doc/secret index 76b147f6f..a27f90f38 100644 --- a/roles/lib_openshift/src/doc/secret +++ b/roles/lib_openshift/src/doc/secret @@ -39,6 +39,12 @@ options: required: false default: default aliases: [] + annotations: + description: + - Annotations to apply to the object + required: false + default: None + aliases: [] files: description: - A list of files provided for secrets diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index 5a307cdb3..1fb32164e 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -597,7 +597,7 @@ class OpenShiftCLIConfig(object): for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ - and (data['value'] or isinstance(data['value'], int)): + and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: diff --git a/roles/lib_openshift/src/lib/route.py b/roles/lib_openshift/src/lib/route.py index 3b54a24fb..b106866cb 100644 --- a/roles/lib_openshift/src/lib/route.py +++ b/roles/lib_openshift/src/lib/route.py @@ -11,6 +11,7 @@ class RouteConfig(object): sname, namespace, kubeconfig, + labels=None, destcacert=None, cacert=None, cert=None, @@ -25,6 +26,7 @@ class RouteConfig(object): self.kubeconfig = kubeconfig self.name = sname self.namespace = namespace + self.labels = labels self.host = host self.tls_termination = tls_termination self.destcacert = destcacert @@ -50,6 +52,8 @@ class RouteConfig(object): self.data['metadata'] = {} self.data['metadata']['name'] = self.name self.data['metadata']['namespace'] = self.namespace + if self.labels: + self.data['metadata']['labels'] = self.labels self.data['spec'] = {} self.data['spec']['host'] = self.host diff --git a/roles/lib_openshift/src/lib/secret.py b/roles/lib_openshift/src/lib/secret.py index a1c202442..ad4b6aa36 100644 --- a/roles/lib_openshift/src/lib/secret.py +++ b/roles/lib_openshift/src/lib/secret.py @@ -10,13 +10,15 @@ class SecretConfig(object): namespace, kubeconfig, secrets=None, - stype=None): + stype=None, + annotations=None): ''' constructor for handling secret options ''' self.kubeconfig = kubeconfig self.name = sname self.type = stype self.namespace = namespace self.secrets = secrets + self.annotations = annotations self.data = {} self.create_dict() @@ -33,6 +35,8 @@ class SecretConfig(object): if self.secrets: for key, value in self.secrets.items(): self.data['data'][key] = value + if self.annotations: + self.data['metadata']['annotations'] = self.annotations # pylint: disable=too-many-instance-attributes class Secret(Yedit): diff --git a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py b/roles/lib_openshift/src/test/integration/filter_plugins/test_filters.py index f350bd25d..f350bd25d 100644 --- a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py +++ b/roles/lib_openshift/src/test/integration/filter_plugins/test_filters.py diff --git a/roles/lib_openshift/src/test/unit/test_oc_route.py b/roles/lib_openshift/src/test/unit/test_oc_route.py index afdb5e4dc..5699f123b 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_route.py +++ b/roles/lib_openshift/src/test/unit/test_oc_route.py @@ -39,6 +39,7 @@ class OCRouteTest(unittest.TestCase): 'debug': False, 'name': 'test', 'namespace': 'default', + 'labels': {'route': 'route'}, 'tls_termination': 'passthrough', 'dest_cacert_path': None, 'cacert_path': None, @@ -64,7 +65,10 @@ class OCRouteTest(unittest.TestCase): "selfLink": "/oapi/v1/namespaces/default/routes/test", "uid": "1b127c67-ecd9-11e6-96eb-0e0d9bdacd26", "resourceVersion": "439182", - "creationTimestamp": "2017-02-07T01:59:48Z" + "creationTimestamp": "2017-02-07T01:59:48Z", + "labels": { + "route": "route" + } }, "spec": { "host": "test.example", @@ -141,6 +145,7 @@ class OCRouteTest(unittest.TestCase): 'debug': False, 'name': 'test', 'namespace': 'default', + 'labels': {'route': 'route'}, 'tls_termination': 'edge', 'dest_cacert_path': None, 'cacert_path': None, @@ -166,7 +171,8 @@ class OCRouteTest(unittest.TestCase): "namespace": "default", "resourceVersion": "517745", "selfLink": "/oapi/v1/namespaces/default/routes/test", - "uid": "b6f25898-ed77-11e6-9755-0e737db1e63a" + "uid": "b6f25898-ed77-11e6-9755-0e737db1e63a", + "labels": {"route": "route"} }, "spec": { "host": "test.openshift.com", @@ -250,6 +256,7 @@ metadata: self.assertTrue(results['changed']) self.assertEqual(results['state'], 'present') self.assertEqual(results['results']['results'][0]['metadata']['name'], 'test') + self.assertEqual(results['results']['results'][0]['metadata']['labels']['route'], 'route') # Making sure our mock was called as we expected mock_cmd.assert_has_calls([ diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 4e7f54f79..94c0f4472 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -13,10 +13,10 @@ openshift_aws_wait_for_ssh: True openshift_aws_clusterid: default openshift_aws_region: us-east-1 openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" +openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" openshift_aws_iam_cert_path: '' -openshift_aws_iam_cert_chain_path: '' openshift_aws_iam_cert_key_path: '' openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}" @@ -143,6 +143,11 @@ openshift_aws_elb_instance_filter: "tag:host-type": "{{ openshift_aws_node_group_type }}" instance-state-name: running +openshift_aws_launch_config_security_groups: +- "{{ openshift_aws_clusterid }}" # default sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s + openshift_aws_node_security_groups: default: name: "{{ openshift_aws_clusterid }}" diff --git a/roles/openshift_aws/filter_plugins/filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py index 06e1f9602..06e1f9602 100644 --- a/roles/openshift_aws/filter_plugins/filters.py +++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/build_ami.yml index 8d4e5ac43..48555e5da 100644 --- a/roles/openshift_aws/tasks/build_ami.yml +++ b/roles/openshift_aws/tasks/build_ami.yml @@ -31,7 +31,7 @@ assign_public_ip: yes region: "{{ openshift_aws_region }}" key_name: "{{ openshift_aws_ssh_key_name }}" - group: "{{ openshift_aws_clusterid }}" + group: "{{ openshift_aws_build_ami_group }}" instance_type: m4.xlarge vpc_subnet_id: "{{ subnetout.subnets[0].id }}" image: "{{ openshift_aws_base_ami }}" diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index a1fdd66fc..7bc3184df 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -29,9 +29,9 @@ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type else openshift_aws_elb_listeners }}" -- name: "Create ELB {{ openshift_aws_elb_name }}" +- name: "Create ELB {{ l_openshift_aws_elb_name }}" ec2_elb_lb: - name: "{{ openshift_aws_elb_name }}" + name: "{{ l_openshift_aws_elb_name }}" state: present security_group_names: "{{ openshift_aws_elb_security_groups }}" idle_timeout: "{{ openshift_aws_elb_idle_timout }}" @@ -49,10 +49,10 @@ # It is necessary to ignore_errors here because the instances are not in 'ready' # state when first added to ELB -- name: "Add instances to ELB {{ openshift_aws_elb_name }}" +- name: "Add instances to ELB {{ l_openshift_aws_elb_name }}" ec2_elb: instance_id: "{{ item.id }}" - ec2_elbs: "{{ openshift_aws_elb_name }}" + ec2_elbs: "{{ l_openshift_aws_elb_name }}" state: present region: "{{ openshift_aws_region }}" wait: False diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml index cd9772a25..f74a62b8b 100644 --- a/roles/openshift_aws/tasks/iam_cert.yml +++ b/roles/openshift_aws/tasks/iam_cert.yml @@ -11,17 +11,23 @@ - "'failed' in elb_cert_chain" - elb_cert_chain.failed - "'msg' in elb_cert_chain" - - "'already exists and has a different certificate body' in elb_cert_chain.msg" - - "'BotoServerError' in elb_cert_chain.msg" + - "'already exists and has a different certificate body' in elb_cert_chain.msg or 'BotoServerError' in elb_cert_chain.msg or 'Traceback' in elb_cert_chain.msg.module_stderr" when: - openshift_aws_create_iam_cert | bool - openshift_aws_iam_cert_path != '' - openshift_aws_iam_cert_key_path != '' - openshift_aws_elb_cert_arn == '' +- debug: msg="{{ elb_cert_chain }}" + - name: set_fact openshift_aws_elb_cert_arn set_fact: openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}" + when: + - openshift_aws_create_iam_cert | bool + - openshift_aws_iam_cert_path != '' + - openshift_aws_iam_cert_key_path != '' + - openshift_aws_elb_cert_arn == '' - name: wait for cert to propagate pause: diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml index 65c5a6cc0..e6be9969c 100644 --- a/roles/openshift_aws/tasks/launch_config.yml +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -4,13 +4,18 @@ when: - openshift_aws_ami is undefined +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + - name: fetch the security groups for launch config ec2_group_facts: filters: - group-name: - - "{{ openshift_aws_clusterid }}" # default sg - - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg - - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s + group-name: "{{ openshift_aws_launch_config_security_groups }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" region: "{{ openshift_aws_region }}" register: ec2sgs @@ -21,7 +26,7 @@ region: "{{ openshift_aws_region }}" image_id: "{{ openshift_aws_ami }}" instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}" - security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}" + security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}" user_data: |- #cloud-config {% if openshift_aws_node_group_type != 'master' %} diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml index 189caeaee..a2920b744 100644 --- a/roles/openshift_aws/tasks/provision.yml +++ b/roles/openshift_aws/tasks/provision.yml @@ -34,14 +34,14 @@ include: elb.yml vars: openshift_aws_elb_direction: internal - openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-internal" + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-internal" openshift_aws_elb_scheme: internal - name: create our master external load balancers include: elb.yml vars: openshift_aws_elb_direction: external - openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-external" + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-external" openshift_aws_elb_scheme: internet-facing - name: wait for ssh to become available diff --git a/roles/openshift_ca/defaults/main.yml b/roles/openshift_ca/defaults/main.yml index ecfcc88b3..742b15df4 100644 --- a/roles/openshift_ca/defaults/main.yml +++ b/roles/openshift_ca/defaults/main.yml @@ -1,3 +1,11 @@ --- openshift_ca_cert_expire_days: 1825 openshift_master_cert_expire_days: 730 + +openshift_ca_config_dir: "{{ openshift.common.config_base }}/master" +openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt" +openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key" +openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt" +openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig" + +openshift_version: "{{ openshift_pkg_version | default('') }}" diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml index dfbdf0cc7..f8b784a63 100644 --- a/roles/openshift_ca/meta/main.yml +++ b/roles/openshift_ca/meta/main.yml @@ -14,4 +14,3 @@ galaxy_info: - system dependencies: - role: openshift_cli -- role: openshift_named_certificates diff --git a/roles/openshift_ca/vars/main.yml b/roles/openshift_ca/vars/main.yml index d04c1766d..4d80bf921 100644 --- a/roles/openshift_ca/vars/main.yml +++ b/roles/openshift_ca/vars/main.yml @@ -1,9 +1,2 @@ --- -openshift_ca_config_dir: "{{ openshift.common.config_base }}/master" -openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt" -openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key" -openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt" -openshift_version: "{{ openshift_pkg_version | default('') }}" - -openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig" loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}" diff --git a/roles/openshift_default_storage_class/README.md b/roles/openshift_default_storage_class/README.md index 198163127..57e732f37 100644 --- a/roles/openshift_default_storage_class/README.md +++ b/roles/openshift_default_storage_class/README.md @@ -1,7 +1,7 @@ openshift_master_storage_class ========= -A role that deploys configuratons for Openshift StorageClass +A role that deploys configurations for Openshift StorageClass Requirements ------------ diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml index 334150f63..5a3e50678 100644 --- a/roles/openshift_docker_facts/tasks/main.yml +++ b/roles/openshift_docker_facts/tasks/main.yml @@ -6,9 +6,6 @@ with_items: - role: docker local_facts: - additional_registries: "{{ openshift_docker_additional_registries | default(None) }}" - blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}" - insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}" selinux_enabled: "{{ openshift_docker_selinux_enabled | default(None) }}" log_driver: "{{ openshift_docker_log_driver | default(None) }}" log_options: "{{ openshift_docker_log_options | default(None) }}" @@ -23,12 +20,6 @@ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" - set_fact: - docker_additional_registries: "{{ openshift.docker.additional_registries - | default(omit) }}" - docker_blocked_registries: "{{ openshift.docker.blocked_registries - | default(omit) }}" - docker_insecure_registries: "{{ openshift.docker.insecure_registries - | default(omit) }}" docker_selinux_enabled: "{{ openshift.docker.selinux_enabled | default(omit) }}" docker_log_driver: "{{ openshift.docker.log_driver | default(omit) }}" docker_log_options: "{{ openshift.docker.log_options | default(omit) }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 1c2c91a5a..215ff4b72 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -55,9 +55,6 @@ def migrate_docker_facts(facts): """ Apply migrations for docker facts """ params = { 'common': ( - 'additional_registries', - 'insecure_registries', - 'blocked_registries', 'options' ), 'node': ( @@ -768,14 +765,6 @@ def set_deployment_facts_if_unset(facts): service_type = 'origin' facts['common']['service_type'] = service_type - if 'docker' in facts: - deployment_type = facts['common']['deployment_type'] - if deployment_type == 'openshift-enterprise': - addtl_regs = facts['docker'].get('additional_registries', []) - ent_reg = 'registry.access.redhat.com' - if ent_reg not in addtl_regs: - facts['docker']['additional_registries'] = addtl_regs + [ent_reg] - for role in ('master', 'node'): if role in facts: deployment_type = facts['common']['deployment_type'] @@ -1680,7 +1669,9 @@ def set_container_facts_if_unset(facts): facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') # If openshift_docker_use_system_container is set and is True .... if 'use_system_container' in list(facts['docker'].keys()): - if facts['docker']['use_system_container']: + # use safe_get_bool as the inventory variable may not be a + # valid boolean on it's own. + if safe_get_bool(facts['docker']['use_system_container']): # ... set the service name to container-engine facts['docker']['service_name'] = 'container-engine' @@ -2248,19 +2239,6 @@ class OpenShiftFacts(object): protected_facts_to_overwrite) if 'docker' in new_local_facts: - # remove duplicate and empty strings from registry lists, preserving order - for cat in ['additional', 'blocked', 'insecure']: - key = '{0}_registries'.format(cat) - if key in new_local_facts['docker']: - val = new_local_facts['docker'][key] - if isinstance(val, string_types): - val = [x.strip() for x in val.split(',')] - seen = set() - new_local_facts['docker'][key] = list() - for registry in val: - if registry not in seen and registry != '': - seen.add(registry) - new_local_facts['docker'][key].append(registry) # Convert legacy log_options comma sep string to a list if present: if 'log_options' in new_local_facts['docker'] and \ isinstance(new_local_facts['docker']['log_options'], string_types): diff --git a/roles/openshift_gcp/defaults/main.yml b/roles/openshift_gcp/defaults/main.yml new file mode 100644 index 000000000..18fc453b2 --- /dev/null +++ b/roles/openshift_gcp/defaults/main.yml @@ -0,0 +1,58 @@ +--- +openshift_gcp_prefix: '' + +openshift_gcp_create_network: True +openshift_gcp_create_registry_bucket: True +openshift_gcp_kubernetes_cluster_status: owned # or shared +openshift_gcp_node_group_type: master + +openshift_gcp_ssh_private_key: '' + +openshift_gcp_project: '' +openshift_gcp_clusterid: default +openshift_gcp_region: us-central1 +openshift_gcp_zone: us-central1-a + +openshift_gcp_network_name: "{{ openshift_gcp_prefix }}network" + +openshift_gcp_iam_service_account: '' +openshift_gcp_iam_service_account_keyfile: '' + +openshift_gcp_master_lb_timeout: 2m + +openshift_gcp_infra_network_instance_group: ig-i + +openshift_gcp_image: 'rhel-7' +openshift_gcp_base_image: rhel-7 + +openshift_gcp_registry_bucket_keyfile: '' +openshift_gcp_registry_bucket_name: "{{ openshift_gcp_prefix }}-docker-registry" + +openshift_gcp_node_group_config: + - name: master + suffix: m + tags: ocp-master + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 1 + - name: infra + suffix: i + tags: ocp-infra-node ocp-node + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 1 + - name: node + suffix: n + tags: ocp-node + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 3 + - name: node-flex + suffix: nf + tags: ocp-node + machine_type: n1-standard-2 + boot_disk_size: 150 + scale: 0 + +openshift_gcp_startup_script_file: '' +openshift_gcp_user_data_file: '' diff --git a/roles/openshift_gcp/templates/dns.j2.sh b/roles/openshift_gcp/templates/dns.j2.sh index eacf84b4d..a7475aaf5 100644 --- a/roles/openshift_gcp/templates/dns.j2.sh +++ b/roles/openshift_gcp/templates/dns.j2.sh @@ -2,12 +2,12 @@ set -euo pipefail -dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}" +dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" # Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist -if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null +if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null fi # Always output the expected nameservers as a comma delimited list -gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ',' +gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ',' diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh index e68e9683f..d72a11de1 100644 --- a/roles/openshift_gcp/templates/provision.j2.sh +++ b/roles/openshift_gcp/templates/provision.j2.sh @@ -2,36 +2,38 @@ set -euo pipefail -# Create SSH key for GCE -if [ ! -f "{{ gce_ssh_private_key }}" ]; then - ssh-keygen -t rsa -f "{{ gce_ssh_private_key }}" -C gce-provision-cloud-user -N '' - ssh-add "{{ gce_ssh_private_key }}" || true -fi +if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then + # Create SSH key for GCE + if [ ! -f "{{ openshift_gcp_ssh_private_key }}" ]; then + ssh-keygen -t rsa -f "{{ openshift_gcp_ssh_private_key }}" -C gce-provision-cloud-user -N '' + ssh-add "{{ openshift_gcp_ssh_private_key }}" || true + fi -# Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there -pub_key=$(cut -d ' ' -f 2 < "{{ gce_ssh_private_key }}.pub") -key_tmp_file='/tmp/ocp-gce-keys' -if ! gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q "$pub_key"; then - if gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q ssh-rsa; then - gcloud --project "{{ gce_project_id }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file" + # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there + pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub") + key_tmp_file='/tmp/ocp-gce-keys' + if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then + if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then + gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file" + fi + echo -n 'cloud-user:' >> "$key_tmp_file" + cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file" + gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}" + rm -f "$key_tmp_file" fi - echo -n 'cloud-user:' >> "$key_tmp_file" - cat "{{ gce_ssh_private_key }}.pub" >> "$key_tmp_file" - gcloud --project "{{ gce_project_id }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}" - rm -f "$key_tmp_file" fi metadata="" -if [[ -n "{{ provision_gce_startup_script_file }}" ]]; then - if [[ ! -f "{{ provision_gce_startup_script_file }}" ]]; then - echo "Startup script file missing at {{ provision_gce_startup_script_file }} from=$(pwd)" +if [[ -n "{{ openshift_gcp_startup_script_file }}" ]]; then + if [[ ! -f "{{ openshift_gcp_startup_script_file }}" ]]; then + echo "Startup script file missing at {{ openshift_gcp_startup_script_file }} from=$(pwd)" exit 1 fi - metadata+="--metadata-from-file=startup-script={{ provision_gce_startup_script_file }}" + metadata+="--metadata-from-file=startup-script={{ openshift_gcp_startup_script_file }}" fi -if [[ -n "{{ provision_gce_user_data_file }}" ]]; then - if [[ ! -f "{{ provision_gce_user_data_file }}" ]]; then - echo "User data file missing at {{ provision_gce_user_data_file }}" +if [[ -n "{{ openshift_gcp_user_data_file }}" ]]; then + if [[ ! -f "{{ openshift_gcp_user_data_file }}" ]]; then + echo "User data file missing at {{ openshift_gcp_user_data_file }}" exit 1 fi if [[ -n "${metadata}" ]]; then @@ -39,14 +41,14 @@ if [[ -n "{{ provision_gce_user_data_file }}" ]]; then else metadata="--metadata-from-file=" fi - metadata+="user-data={{ provision_gce_user_data_file }}" + metadata+="user-data={{ openshift_gcp_user_data_file }}" fi # Select image or image family -image="{{ provision_gce_registered_image }}" -if ! gcloud --project "{{ gce_project_id }}" compute images describe "${image}" &>/dev/null; then - if ! gcloud --project "{{ gce_project_id }}" compute images describe-from-family "${image}" &>/dev/null; then - echo "No compute image or image-family found, create an image named '{{ provision_gce_registered_image }}' to continue'" +image="{{ openshift_gcp_image }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe "${image}" &>/dev/null; then + if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe-from-family "${image}" &>/dev/null; then + echo "No compute image or image-family found, create an image named '{{ openshift_gcp_image }}' to continue'" exit 1 fi image="family/${image}" @@ -54,19 +56,19 @@ fi ### PROVISION THE INFRASTRUCTURE ### -dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}" +dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" # Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers -if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then +if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script" exit 1 fi # Create network -if ! gcloud --project "{{ gce_project_id }}" compute networks describe "{{ gce_network_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute networks create "{{ gce_network_name }}" --mode "auto" +if ! gcloud --project "{{ openshift_gcp_project }}" compute networks describe "{{ openshift_gcp_network_name }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute networks create "{{ openshift_gcp_network_name }}" --mode "auto" else - echo "Network '{{ gce_network_name }}' already exists" + echo "Network '{{ openshift_gcp_network_name }}' already exists" fi # Firewall rules in a form: @@ -87,56 +89,56 @@ declare -A FW_RULES=( ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node" ) for rule in "${!FW_RULES[@]}"; do - ( if ! gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute firewall-rules create "{{ provision_prefix }}$rule" --network "{{ gce_network_name }}" ${FW_RULES[$rule]} + ( if ! gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules create "{{ openshift_gcp_prefix }}$rule" --network "{{ openshift_gcp_network_name }}" ${FW_RULES[$rule]} else - echo "Firewall rule '{{ provision_prefix }}${rule}' already exists" + echo "Firewall rule '{{ openshift_gcp_prefix }}${rule}' already exists" fi ) & done # Master IP -( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-ssl-lb-ip" --global +( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global else - echo "IP '{{ provision_prefix }}master-ssl-lb-ip' already exists" + echo "IP '{{ openshift_gcp_prefix }}master-ssl-lb-ip' already exists" fi ) & # Internal master IP -( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" +( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" else - echo "IP '{{ provision_prefix }}master-network-lb-ip' already exists" + echo "IP '{{ openshift_gcp_prefix }}master-network-lb-ip' already exists" fi ) & # Router IP -( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" +( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" else - echo "IP '{{ provision_prefix }}router-network-lb-ip' already exists" + echo "IP '{{ openshift_gcp_prefix }}router-network-lb-ip' already exists" fi ) & -{% for node_group in provision_gce_node_groups %} +{% for node_group in openshift_gcp_node_group_config %} # configure {{ node_group.name }} ( - if ! gcloud --project "{{ gce_project_id }}" compute instance-templates describe "{{ provision_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute instance-templates create "{{ provision_prefix }}instance-template-{{ node_group.name }}" \ - --machine-type "{{ node_group.machine_type }}" --network "{{ gce_network_name }}" \ - --tags "{{ provision_prefix }}ocp,ocp,{{ node_group.tags }}" \ + if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \ + --machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \ + --tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ node_group.tags }}" \ --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \ --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \ --image "${image}" ${metadata} else - echo "Instance template '{{ provision_prefix }}instance-template-{{ node_group.name }}' already exists" + echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists" fi # Create instance group - if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed describe "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute instance-groups managed create "{{ provision_prefix }}ig-{{ node_group.suffix }}" \ - --zone "{{ gce_zone_name }}" --template "{{ provision_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}" + if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed describe "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed create "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" \ + --zone "{{ openshift_gcp_zone }}" --template "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}" else - echo "Instance group '{{ provision_prefix }}ig-{{ node_group.suffix }}' already exists" + echo "Instance group '{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}' already exists" fi ) & {% endfor %} @@ -147,36 +149,36 @@ for i in `jobs -p`; do wait $i; done # Configure the master external LB rules ( # Master health check -if ! gcloud --project "{{ gce_project_id }}" compute health-checks describe "{{ provision_prefix }}master-ssl-lb-health-check" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute health-checks create https "{{ provision_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz" +if ! gcloud --project "{{ openshift_gcp_project }}" compute health-checks describe "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute health-checks create https "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz" else - echo "Health check '{{ provision_prefix }}master-ssl-lb-health-check' already exists" + echo "Health check '{{ openshift_gcp_prefix }}master-ssl-lb-health-check' already exists" fi -gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-named-ports "{{ provision_prefix }}ig-m" \ - --zone "{{ gce_zone_name }}" --named-ports "{{ provision_prefix }}port-name-master:{{ internal_console_port }}" +gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-named-ports "{{ openshift_gcp_prefix }}ig-m" \ + --zone "{{ openshift_gcp_zone }}" --named-ports "{{ openshift_gcp_prefix }}port-name-master:{{ internal_console_port }}" # Master backend service -if ! gcloud --project "{{ gce_project_id }}" compute backend-services describe "{{ provision_prefix }}master-ssl-lb-backend" --global &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute backend-services create "{{ provision_prefix }}master-ssl-lb-backend" --health-checks "{{ provision_prefix }}master-ssl-lb-health-check" --port-name "{{ provision_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ provision_gce_master_https_timeout | default('2m') }}" - gcloud --project "{{ gce_project_id }}" compute backend-services add-backend "{{ provision_prefix }}master-ssl-lb-backend" --instance-group "{{ provision_prefix }}ig-m" --global --instance-group-zone "{{ gce_zone_name }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute backend-services describe "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --global &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute backend-services create "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --health-checks "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port-name "{{ openshift_gcp_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ openshift_gcp_master_lb_timeout }}" + gcloud --project "{{ openshift_gcp_project }}" compute backend-services add-backend "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --instance-group "{{ openshift_gcp_prefix }}ig-m" --global --instance-group-zone "{{ openshift_gcp_zone }}" else - echo "Backend service '{{ provision_prefix }}master-ssl-lb-backend' already exists" + echo "Backend service '{{ openshift_gcp_prefix }}master-ssl-lb-backend' already exists" fi # Master tcp proxy target -if ! gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies describe "{{ provision_prefix }}master-ssl-lb-target" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies create "{{ provision_prefix }}master-ssl-lb-target" --backend-service "{{ provision_prefix }}master-ssl-lb-backend" +if ! gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies describe "{{ openshift_gcp_prefix }}master-ssl-lb-target" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies create "{{ openshift_gcp_prefix }}master-ssl-lb-target" --backend-service "{{ openshift_gcp_prefix }}master-ssl-lb-backend" else - echo "Proxy target '{{ provision_prefix }}master-ssl-lb-target' already exists" + echo "Proxy target '{{ openshift_gcp_prefix }}master-ssl-lb-target' already exists" fi # Master forwarding rule -if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-ssl-lb-rule" --global &>/dev/null; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)') - gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ provision_prefix }}master-ssl-lb-target" +if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --global &>/dev/null; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)') + gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ openshift_gcp_prefix }}master-ssl-lb-target" else - echo "Forwarding rule '{{ provision_prefix }}master-ssl-lb-rule' already exists" + echo "Forwarding rule '{{ openshift_gcp_prefix }}master-ssl-lb-rule' already exists" fi ) & @@ -184,25 +186,25 @@ fi # Configure the master internal LB rules ( # Internal master health check -if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}master-network-lb-health-check" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz" +if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}master-network-lb-health-check" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz" else - echo "Health check '{{ provision_prefix }}master-network-lb-health-check' already exists" + echo "Health check '{{ openshift_gcp_prefix }}master-network-lb-health-check' already exists" fi # Internal master target pool -if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}master-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}master-network-lb-pool" --http-health-check "{{ provision_prefix }}master-network-lb-health-check" --region "{{ gce_region_name }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}master-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}master-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}master-network-lb-health-check" --region "{{ openshift_gcp_region }}" else - echo "Target pool '{{ provision_prefix }}master-network-lb-pool' already exists" + echo "Target pool '{{ openshift_gcp_prefix }}master-network-lb-pool' already exists" fi # Internal master forwarding rule -if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') - gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}master-network-lb-pool" +if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') + gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}master-network-lb-pool" else - echo "Forwarding rule '{{ provision_prefix }}master-network-lb-rule' already exists" + echo "Forwarding rule '{{ openshift_gcp_prefix }}master-network-lb-rule' already exists" fi ) & @@ -210,25 +212,25 @@ fi # Configure the infra node rules ( # Router health check -if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}router-network-lb-health-check" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz" +if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}router-network-lb-health-check" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz" else - echo "Health check '{{ provision_prefix }}router-network-lb-health-check' already exists" + echo "Health check '{{ openshift_gcp_prefix }}router-network-lb-health-check' already exists" fi # Router target pool -if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}router-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then - gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}router-network-lb-pool" --http-health-check "{{ provision_prefix }}router-network-lb-health-check" --region "{{ gce_region_name }}" +if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}router-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}router-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}router-network-lb-health-check" --region "{{ openshift_gcp_region }}" else - echo "Target pool '{{ provision_prefix }}router-network-lb-pool' already exists" + echo "Target pool '{{ openshift_gcp_prefix }}router-network-lb-pool' already exists" fi # Router forwarding rule -if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}router-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') - gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}router-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}router-network-lb-pool" +if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}router-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') + gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}router-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}router-network-lb-pool" else - echo "Forwarding rule '{{ provision_prefix }}router-network-lb-rule' already exists" + echo "Forwarding rule '{{ openshift_gcp_prefix }}router-network-lb-rule' already exists" fi ) & @@ -236,11 +238,11 @@ for i in `jobs -p`; do wait $i; done # set the target pools ( -if [[ "ig-m" == "{{ provision_gce_router_network_instance_group }}" ]]; then - gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool,{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}" +if [[ "ig-m" == "{{ openshift_gcp_infra_network_instance_group }}" ]]; then + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool,{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}" else - gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool" --zone "{{ gce_zone_name }}" - gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}{{ provision_gce_router_network_instance_group }}" --target-pools "{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}" + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool" --zone "{{ openshift_gcp_zone }}" + gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}{{ openshift_gcp_infra_network_instance_group }}" --target-pools "{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}" fi ) & @@ -252,42 +254,42 @@ while true; do rm -f $dns # DNS record for master lb - if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)') + if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)') if [[ ! -f $dns ]]; then - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" fi - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP" else echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists" fi # DNS record for internal master lb - if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') + if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') if [[ ! -f $dns ]]; then - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" fi - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP" else echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists" fi # DNS record for router lb - if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then - IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)') + if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then + IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)') if [[ ! -f $dns ]]; then - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" fi - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP" - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}." + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}." else echo "DNS record for '{{ wildcard_zone }}' already exists" fi # Commit all DNS changes, retrying if preconditions are not met if [[ -f $dns ]]; then - if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then + if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then rc=$? if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then continue @@ -301,17 +303,17 @@ done # Create bucket for registry ( -if ! gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then - gsutil mb -p "{{ gce_project_id }}" -l "{{ gce_region_name }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" +if ! gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then + gsutil mb -p "{{ openshift_gcp_project }}" -l "{{ openshift_gcp_region }}" "gs://{{ openshift_gcp_registry_bucket_name }}" else - echo "Bucket '{{ openshift_hosted_registry_storage_gcs_bucket }}' already exists" + echo "Bucket '{{ openshift_gcp_registry_bucket_name }}' already exists" fi ) & # wait until all node groups are stable -{% for node_group in provision_gce_node_groups %} +{% for node_group in openshift_gcp_node_group_config %} # wait for stable {{ node_group.name }} -( gcloud --project "{{ gce_project_id }}" compute instance-groups managed wait-until-stable "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --timeout=300) & +( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=300) & {% endfor %} diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh index 41ceab2b5..a1e0affec 100644 --- a/roles/openshift_gcp/templates/remove.j2.sh +++ b/roles/openshift_gcp/templates/remove.j2.sh @@ -18,8 +18,8 @@ function teardown_cmd() { if [[ -z "${found}" ]]; then flag=$((flag+1)) fi - if gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then - gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag} + if gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then + gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag} fi } @@ -33,11 +33,11 @@ function teardown() { } # Preemptively spin down the instances -{% for node_group in provision_gce_node_groups %} +{% for node_group in openshift_gcp_node_group_config %} # scale down {{ node_group.name }} ( # performs a delete and scale down as one operation to ensure maximum parallelism - if ! instances=$( gcloud --project "{{ gce_project_id }}" compute instance-groups managed list-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --format='value[terminator=","](instance)' ); then + if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' ); then exit 0 fi instances="${instances%?}" @@ -45,7 +45,7 @@ function teardown() { echo "warning: No instances in {{ node_group.name }}" 1>&2 exit 0 fi - if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed delete-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --instances "${instances}"; then + if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed delete-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --instances "${instances}"; then echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2 exit 0 fi @@ -54,15 +54,15 @@ function teardown() { # Bucket for registry ( -if gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then - gsutil -m rm -r "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" +if gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then + gsutil -m rm -r "gs://{{ openshift_gcp_registry_bucket_name }}" fi ) & # DNS ( -dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}" -if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then +dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" +if gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then # Retry DNS changes until they succeed since this may be a shared resource while true; do dns="${TMPDIR:-/tmp}/dns.yaml" @@ -70,16 +70,16 @@ if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zon # export all dns records that match into a zone format, and turn each line into a set of args for # record-sets transaction. - gcloud dns record-sets export --project "{{ gce_project_id }}" -z "${dns_zone}" --zone-file-format "${dns}" + gcloud dns record-sets export --project "{{ openshift_gcp_project }}" -z "${dns_zone}" --zone-file-format "${dns}" if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \ awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input" then rm -f "${dns}" - gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" - cat "${dns}.input" | xargs -L1 gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}" + gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}" + cat "${dns}.input" | xargs -L1 gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}" # Commit all DNS changes, retrying if preconditions are not met - if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then + if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then rc=$? if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then continue @@ -95,25 +95,25 @@ fi ( # Router network rules -teardown "{{ provision_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}" -teardown "{{ provision_prefix }}router-network-lb-pool" compute target-pools --region "{{ gce_region_name }}" -teardown "{{ provision_prefix }}router-network-lb-health-check" compute http-health-checks -teardown "{{ provision_prefix }}router-network-lb-ip" compute addresses --region "{{ gce_region_name }}" +teardown "{{ openshift_gcp_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}router-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}router-network-lb-health-check" compute http-health-checks +teardown "{{ openshift_gcp_prefix }}router-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}" # Internal master network rules -teardown "{{ provision_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}" -teardown "{{ provision_prefix }}master-network-lb-pool" compute target-pools --region "{{ gce_region_name }}" -teardown "{{ provision_prefix }}master-network-lb-health-check" compute http-health-checks -teardown "{{ provision_prefix }}master-network-lb-ip" compute addresses --region "{{ gce_region_name }}" +teardown "{{ openshift_gcp_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}master-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}" +teardown "{{ openshift_gcp_prefix }}master-network-lb-health-check" compute http-health-checks +teardown "{{ openshift_gcp_prefix }}master-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}" ) & ( # Master SSL network rules -teardown "{{ provision_prefix }}master-ssl-lb-rule" compute forwarding-rules --global -teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-tcp-proxies -teardown "{{ provision_prefix }}master-ssl-lb-ip" compute addresses --global -teardown "{{ provision_prefix }}master-ssl-lb-backend" compute backend-services --global -teardown "{{ provision_prefix }}master-ssl-lb-health-check" compute health-checks +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-rule" compute forwarding-rules --global +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-target" compute target-tcp-proxies +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-ip" compute addresses --global +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-backend" compute backend-services --global +teardown "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" compute health-checks ) & #Firewall rules @@ -130,10 +130,10 @@ declare -A FW_RULES=( ['infra-node-external']="" ) for rule in "${!FW_RULES[@]}"; do - ( if gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then + ( if gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then # retry a few times because this call can be flaky for i in `seq 1 3`; do - if gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"; then + if gcloud -q --project "{{ openshift_gcp_project }}" compute firewall-rules delete "{{ openshift_gcp_prefix }}$rule"; then break fi done @@ -142,15 +142,15 @@ done for i in `jobs -p`; do wait $i; done -{% for node_group in provision_gce_node_groups %} +{% for node_group in openshift_gcp_node_group_config %} # teardown {{ node_group.name }} - any load balancers referencing these groups must be removed ( - teardown "{{ provision_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ gce_zone_name }}" - teardown "{{ provision_prefix }}instance-template-{{ node_group.name }}" compute instance-templates + teardown "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ openshift_gcp_zone }}" + teardown "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" compute instance-templates ) & {% endfor %} for i in `jobs -p`; do wait $i; done # Network -teardown "{{ gce_network_name }}" compute networks +teardown "{{ openshift_gcp_network_name }}" compute networks diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 98372d979..fa07c1dde 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -1,5 +1,6 @@ """Check that required Docker images are available.""" +from ansible.module_utils import six from openshift_checks import OpenShiftCheck from openshift_checks.mixins import DockerHostMixin @@ -153,7 +154,15 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): def known_docker_registries(self): """Build a list of docker registries available according to inventory vars.""" - regs = list(self.get_var("openshift.docker.additional_registries", default=[])) + regs = self.get_var("openshift_docker_additional_registries", default=[]) + # https://bugzilla.redhat.com/show_bug.cgi?id=1497274 + # if the result was a string type, place it into a list. We must do this + # as using list() on a string will split the string into its characters. + if isinstance(regs, six.string_types): + regs = [regs] + else: + # Otherwise cast to a list as was done previously + regs = list(regs) deployment_type = self.get_var("openshift_deployment_type") if deployment_type == "origin" and "docker.io" not in regs: diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index 952fa9aa6..c523ffd5c 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -72,7 +72,7 @@ def test_all_images_available_remotely(task_vars, available_locally): return {'images': [], 'failed': available_locally} return {} - task_vars['openshift']['docker']['additional_registries'] = ["docker.io", "registry.access.redhat.com"] + task_vars['openshift_docker_additional_registries'] = ["docker.io", "registry.access.redhat.com"] task_vars['openshift_image_tag'] = 'v3.4' check = DockerImageAvailability(execute_module, task_vars) check._module_retry_interval = 0 @@ -90,7 +90,7 @@ def test_all_images_unavailable(task_vars): return {} # docker_image_facts failure - task_vars['openshift']['docker']['additional_registries'] = ["docker.io"] + task_vars['openshift_docker_additional_registries'] = ["docker.io"] task_vars['openshift_deployment_type'] = "openshift-enterprise" task_vars['openshift_image_tag'] = 'latest' check = DockerImageAvailability(execute_module, task_vars) @@ -121,9 +121,9 @@ def test_no_known_registries(): service_type='origin', is_containerized=False, is_atomic=False, - ), - docker=dict(additional_registries=["docker.io"]), + ) ), + openshift_docker_additional_registries=["docker.io"], openshift_deployment_type="openshift-enterprise", openshift_image_tag='latest', group_names=['nodes', 'masters'], @@ -154,7 +154,7 @@ def test_skopeo_update_failure(task_vars, message, extra_words): return {} - task_vars['openshift']['docker']['additional_registries'] = ["unknown.io"] + task_vars['openshift_docker_additional_registries'] = ["unknown.io"] task_vars['openshift_deployment_type'] = "openshift-enterprise" check = DockerImageAvailability(execute_module, task_vars) check._module_retry_interval = 0 diff --git a/roles/openshift_hosted/filter_plugins/filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py index 7f41529ac..7f41529ac 100644 --- a/roles/openshift_hosted/filter_plugins/filters.py +++ b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml index 2aeecc943..2aceef9e4 100644 --- a/roles/openshift_hosted/tasks/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -52,9 +52,9 @@ certfile: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}" keyfile: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}" cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}" - - # End Block - when: ( openshift_hosted_router_create_certificate | bool ) and openshift_hosted_router_certificate == {} + when: + - openshift_hosted_router_create_certificate | bool + - openshift_hosted_router_certificate == {} - name: Create the router service account(s) oc_serviceaccount: diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 53d1a8bc7..47dc9171d 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -1,9 +1,11 @@ --- +# openshift_*_selector variables have been deprecated in favor of +# openshift_hosted_*_selector variables. - set_fact: - openshift_hosted_router_selector: "{{ openshift_hosted_infra_selector }}" + openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}" when: openshift_hosted_router_selector is not defined and openshift_hosted_infra_selector is defined - set_fact: - openshift_hosted_registry_selector: "{{ openshift_hosted_infra_selector }}" + openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" when: openshift_hosted_registry_selector is not defined and openshift_hosted_infra_selector is defined - name: Set hosted facts diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index de3d19858..829c78728 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -12,13 +12,13 @@ generation for Elasticsearch (it uses JKS) as well as openssl to sign certificat As part of the installation, it is recommended that you add the Fluentd node selector label to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels). -###Required vars: +### Required vars: - `openshift_logging_install_logging`: When `True` the `openshift_logging` role will install Aggregated Logging. When `openshift_logging_install_logging` is set to `False` the `openshift_logging` role will uninstall Aggregated Logging. -###Optional vars: +### Optional vars: - `openshift_logging_purge_logging`: When `openshift_logging_install_logging` is set to 'False' to trigger uninstalation and `openshift_logging_purge_logging` is set to 'True', it will completely and irreversibly remove all logging persistent data including PVC. Defaults to 'False'. - `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'. - `openshift_logging_curator_image_prefix`: Setting the image prefix for Curator image. Defaults to `openshift_logging_image_prefix`. @@ -90,6 +90,12 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin - `openshift_logging_es_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'. - `openshift_logging_es_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'. +- `openshift_logging_install_eventrouter`: Coupled with `openshift_logging_install_logging`. When both are 'True', eventrouter will be installed. When both are 'False', eventrouter will be uninstalled. +Other combinations will keep the eventrouter untouched. + +Detailed eventrouter configuration can be found in +- `roles/openshift_logging_eventrouter/README.md` + When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the same as above for their non-ops counterparts, but apply to the OPS cluster instance: - `openshift_logging_es_ops_host`: logging-es-ops @@ -163,7 +169,7 @@ Elasticsearch OPS too, if using an OPS cluster: send the raw logs to mux for processing. We do not currently recommend using this mode, and ansible will warn you about this. - `openshift_logging_mux_hostname`: Default is "mux." + - `openshift_master_default_subdomain`. This is the hostname *external*_ + `openshift_master_default_subdomain`. This is the hostname *external* clients will use to connect to mux, and will be used in the TLS server cert subject. - `openshift_logging_mux_port`: 24284 @@ -193,3 +199,26 @@ Elasticsearch OPS too, if using an OPS cluster: Defaults to 'logging-mux'. - `openshift_logging_mux_file_buffer_storage_group`: The storage group used for Mux. Defaults to '65534'. + +### remote syslog forwarding +- `openshift_logging_fluentd_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false` +- `openshift_logging_fluentd_remote_syslog_host`: Required, hostname or IP of remote syslog server +- `openshift_logging_fluentd_remote_syslog_port`: Port of remote syslog server, defaults to `514` +- `openshift_logging_fluentd_remote_syslog_severity`: Syslog severity level, defaults to `debug` +- `openshift_logging_fluentd_remote_syslog_facility`: Syslog facility, defaults to `local0` +- `openshift_logging_fluentd_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty) +- `openshift_logging_fluentd_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message +- `openshift_logging_fluentd_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false` +- `openshift_logging_fluentd_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message + +The corresponding openshift\_logging\_mux\_* parameters are below. + +- `openshift_logging_mux_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false` +- `openshift_logging_mux_remote_syslog_host`: Required, hostname or IP of remote syslog server +- `openshift_logging_mux_remote_syslog_port`: Port of remote syslog server, defaults to `514` +- `openshift_logging_mux_remote_syslog_severity`: Syslog severity level, defaults to `debug` +- `openshift_logging_mux_remote_syslog_facility`: Syslog facility, defaults to `local0` +- `openshift_logging_mux_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty) +- `openshift_logging_mux_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message +- `openshift_logging_mux_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false` +- `openshift_logging_mux_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index db4262fed..0f1f659c6 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -8,7 +8,6 @@ openshift_logging_labels: {} openshift_logging_label_key: "" openshift_logging_label_value: "" openshift_logging_install_logging: False -openshift_logging_uninstall_logging: False openshift_logging_purge_logging: False openshift_logging_image_pull_secret: "" @@ -32,7 +31,7 @@ openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_memory_limit: 736Mi openshift_logging_kibana_proxy_debug: false openshift_logging_kibana_proxy_cpu_limit: null -openshift_logging_kibana_proxy_memory_limit: 96Mi +openshift_logging_kibana_proxy_memory_limit: 256Mi openshift_logging_kibana_replica_count: 1 openshift_logging_kibana_edge_term_policy: Redirect @@ -56,7 +55,7 @@ openshift_logging_kibana_ops_cpu_limit: null openshift_logging_kibana_ops_memory_limit: 736Mi openshift_logging_kibana_ops_proxy_debug: false openshift_logging_kibana_ops_proxy_cpu_limit: null -openshift_logging_kibana_ops_proxy_memory_limit: 96Mi +openshift_logging_kibana_ops_proxy_memory_limit: 256Mi openshift_logging_kibana_ops_replica_count: 1 #The absolute path on the control node to the cert file to use diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 45298e345..3040d15ca 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -105,3 +105,9 @@ - logging-elasticsearch - logging-fluentd - logging-mux + +## EventRouter +- include_role: + name: openshift_logging_eventrouter + when: + not openshift_logging_install_eventrouter | default(false) | bool diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index b9b408e4e..a3e653cb8 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -266,4 +266,12 @@ openshift_logging_fluentd_master_url: "{{ openshift_logging_master_url }}" openshift_logging_fluentd_namespace: "{{ openshift_logging_namespace }}" + +## EventRouter +- include_role: + name: openshift_logging_eventrouter + when: + openshift_logging_install_eventrouter | default(false) | bool + + - include: update_master_config.yaml diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 0da9771c7..15f6a23e6 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -36,7 +36,7 @@ - include: delete_logging.yaml when: - - openshift_logging_uninstall_logging | default(false) | bool + - not openshift_logging_install_logging | default(false) | bool - name: Cleaning up local temp dir local_action: file path="{{local_tmp.stdout}}" state=absent diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml index 49e8a18af..f60fa8d7d 100644 --- a/roles/openshift_logging/vars/openshift-enterprise.yml +++ b/roles/openshift_logging/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('v3.6') }}" +__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('v3.7') }}" diff --git a/roles/openshift_logging_eventrouter/README.md b/roles/openshift_logging_eventrouter/README.md new file mode 100644 index 000000000..da313d68b --- /dev/null +++ b/roles/openshift_logging_eventrouter/README.md @@ -0,0 +1,20 @@ +Event router +------------ + +A pod forwarding kubernetes events to EFK aggregated logging stack. + +- **eventrouter** is deployed to logging project, has a service account and its own role to read events +- **eventrouter** watches kubernetes events, marshalls them to JSON and outputs to its sink, currently only various formatting to STDOUT +- **fluentd** picks them up and inserts to elasticsearch *.operations* index + +- `openshift_logging_install_eventrouter`: When 'True', eventrouter will be installed. When 'False', eventrouter will be uninstalled. + +Configuration variables: + +- `openshift_logging_eventrouter_image_prefix`: The prefix for the eventrouter logging image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_eventrouter_image_version`: The image version for the logging eventrouter. Defaults to 'latest'. +- `openshift_logging_eventrouter_sink`: Select a sink for eventrouter, supported 'stdout' and 'glog'. Defaults to 'stdout'. +- `openshift_logging_eventrouter_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. +- `openshift_logging_eventrouter_cpu_limit`: The amount of CPU to allocate to eventrouter. Defaults to '100m'. +- `openshift_logging_eventrouter_memory_limit`: The memory limit for eventrouter pods. Defaults to '128Mi'. +- `openshift_logging_eventrouter_namespace`: The namespace where eventrouter is deployed. Defaults to 'default'. diff --git a/roles/openshift_logging_eventrouter/defaults/main.yaml b/roles/openshift_logging_eventrouter/defaults/main.yaml new file mode 100644 index 000000000..34e33f75f --- /dev/null +++ b/roles/openshift_logging_eventrouter/defaults/main.yaml @@ -0,0 +1,9 @@ +--- +openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version | default('latest') }}" +openshift_logging_eventrouter_replicas: 1 +openshift_logging_eventrouter_sink: stdout +openshift_logging_eventrouter_nodeselector: "" +openshift_logging_eventrouter_cpu_limit: 100m +openshift_logging_eventrouter_memory_limit: 128Mi +openshift_logging_eventrouter_namespace: default diff --git a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml new file mode 100644 index 000000000..91708e54b --- /dev/null +++ b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml @@ -0,0 +1,103 @@ +# this openshift template should match (except nodeSelector) jinja2 template in +# ../templates/eventrouter-template.j2 +kind: Template +apiVersion: v1 +metadata: + name: eventrouter-template + annotations: + description: "A pod forwarding kubernetes events to EFK aggregated logging stack." + tags: "events,EFK,logging" +objects: + - kind: ServiceAccount + apiVersion: v1 + metadata: + name: aggregated-logging-eventrouter + - kind: ClusterRole + apiVersion: v1 + metadata: + name: event-reader + rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] + - kind: ConfigMap + apiVersion: v1 + metadata: + name: logging-eventrouter + data: + config.json: |- + { + "sink": "${SINK}" + } + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: logging-eventrouter + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + spec: + selector: + component: eventrouter + logging-infra: eventrouter + provider: openshift + replicas: ${REPLICAS} + template: + metadata: + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + name: logging-eventrouter + spec: + serviceAccount: aggregated-logging-eventrouter + serviceAccountName: aggregated-logging-eventrouter + containers: + - name: kube-eventrouter + image: ${IMAGE} + imagePullPolicy: Always + resources: + limits: + memory: ${MEMORY} + cpu: ${CPU} + requires: + memory: ${MEMORY} + volumeMounts: + - name: config-volume + mountPath: /etc/eventrouter + volumes: + - name: config-volume + configMap: + name: logging-eventrouter + - kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: event-reader-binding + subjects: + - kind: ServiceAccount + name: aggregated-logging-eventrouter + namespace: ${NAMESPACE} + roleRef: + kind: ClusterRole + name: event-reader + +parameters: + - name: SINK + displayName: Sink + value: stdout + - name: REPLICAS + displayName: Replicas + value: "1" + - name: IMAGE + displayName: Image + value: "docker.io/openshift/origin-logging-eventrouter:latest" + - name: MEMORY + displayName: Memory + value: "128Mi" + - name: CPU + displayName: CPU + value: "100m" + - name: NAMESPACE + displayName: Namespace + value: default diff --git a/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml new file mode 100644 index 000000000..cf0abbde9 --- /dev/null +++ b/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml @@ -0,0 +1,40 @@ +--- +# delete eventrouter +- name: Delete EventRouter service account + oc_serviceaccount: + state: absent + name: "aggregated-logging-eventrouter" + namespace: "{{ openshift_logging_eventrouter_namespace }}" + +- name: Delete event-reader cluster role + oc_clusterrole: + state: absent + name: event-reader + +- name: Unset privileged permissions for EventRouter + oc_adm_policy_user: + namespace: "{{ openshift_logging_eventrouter_namespace }}" + resource_kind: cluster-role + resource_name: event-reader + state: absent + user: "system:serviceaccount:{{ openshift_logging_eventrouter_namespace }}:aggregated-logging-eventrouter" + +- name: Delete EventRouter configmap + oc_configmap: + state: absent + name: logging-eventrouter + namespace: "{{ openshift_logging_eventrouter_namespace }}" + +- name: Delete EventRouter DC + oc_obj: + state: absent + name: logging-eventrouter + namespace: "{{ openshift_logging_eventrouter_namespace }}" + kind: dc + +- name: Delete EventRouter Template + oc_obj: + state: absent + name: eventrouter-template + namespace: "{{ openshift_logging_eventrouter_namespace }}" + kind: template diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml new file mode 100644 index 000000000..8df7435e2 --- /dev/null +++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml @@ -0,0 +1,59 @@ +--- +# initial checks +- assert: + msg: Invalid sink type "{{openshift_logging_eventrouter_sink}}", only one of "{{__eventrouter_sinks}}" allowed + that: openshift_logging_eventrouter_sink in __eventrouter_sinks + +# allow passing in a tempdir +- name: Create temp directory for doing work in + command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX + register: mktemp + changed_when: False + +- set_fact: + tempdir: "{{ mktemp.stdout }}" + +- name: Create templates subdirectory + file: + state: directory + path: "{{ tempdir }}/templates" + mode: 0755 + changed_when: False + +# create EventRouter deployment config +- name: Generate EventRouter template + template: + src: eventrouter-template.j2 + dest: "{{ tempdir }}/templates/eventrouter-template.yaml" + vars: + node_selector: "{{ openshift_logging_eventrouter_nodeselector | default({}) }}" + +- name: Create EventRouter template + oc_obj: + namespace: "{{ openshift_logging_eventrouter_namespace }}" + kind: template + name: eventrouter-template + state: present + files: + - "{{ tempdir }}/templates/eventrouter-template.yaml" + +- name: Process EventRouter template + oc_process: + state: present + template_name: eventrouter-template + namespace: "{{ openshift_logging_eventrouter_namespace }}" + params: + IMAGE: "{{openshift_logging_eventrouter_image_prefix}}logging-eventrouter:{{openshift_logging_eventrouter_image_version}}" + REPLICAS: "{{ openshift_logging_eventrouter_replicas }}" + CPU: "{{ openshift_logging_eventrouter_cpu_limit }}" + MEMORY: "{{ openshift_logging_eventrouter_memory_limit }}" + NAMESPACE: "{{ openshift_logging_eventrouter_namespace }}" + SINK: "{{ openshift_logging_eventrouter_sink }}" + +## Placeholder for migration when necessary ## + +- name: Delete temp directory + file: + name: "{{ tempdir }}" + state: absent + changed_when: False diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml new file mode 100644 index 000000000..58e5a559f --- /dev/null +++ b/roles/openshift_logging_eventrouter/tasks/main.yaml @@ -0,0 +1,6 @@ +--- +- include: "{{ role_path }}/tasks/install_eventrouter.yaml" + when: openshift_logging_install_eventrouter | default(false) | bool + +- include: "{{ role_path }}/tasks/delete_eventrouter.yaml" + when: not openshift_logging_install_eventrouter | default(false) | bool diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 new file mode 100644 index 000000000..9ff4c7e80 --- /dev/null +++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 @@ -0,0 +1,109 @@ +# this jinja2 template should always match (except nodeSelector) openshift template in +# ../files/eventrouter-template.yaml +kind: Template +apiVersion: v1 +metadata: + name: eventrouter-template + annotations: + description: "A pod forwarding kubernetes events to EFK aggregated logging stack." + tags: "events,EFK,logging" +objects: + - kind: ServiceAccount + apiVersion: v1 + metadata: + name: aggregated-logging-eventrouter + - kind: ClusterRole + apiVersion: v1 + metadata: + name: event-reader + rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] + - kind: ConfigMap + apiVersion: v1 + metadata: + name: logging-eventrouter + data: + config.json: |- + { + "sink": "${SINK}" + } + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: logging-eventrouter + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + spec: + selector: + component: eventrouter + logging-infra: eventrouter + provider: openshift + replicas: ${REPLICAS} + template: + metadata: + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + name: logging-eventrouter + spec: + serviceAccount: aggregated-logging-eventrouter + serviceAccountName: aggregated-logging-eventrouter +{% if node_selector is iterable and node_selector | length > 0 %} + nodeSelector: +{% for key, value in node_selector.iteritems() %} + {{ key }}: "{{ value }}" +{% endfor %} +{% endif %} + containers: + - name: kube-eventrouter + image: ${IMAGE} + imagePullPolicy: Always + resources: + limits: + memory: ${MEMORY} + cpu: ${CPU} + requires: + memory: ${MEMORY} + volumeMounts: + - name: config-volume + mountPath: /etc/eventrouter + volumes: + - name: config-volume + configMap: + name: logging-eventrouter + - kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: event-reader-binding + subjects: + - kind: ServiceAccount + name: aggregated-logging-eventrouter + namespace: ${NAMESPACE} + roleRef: + kind: ClusterRole + name: event-reader + +parameters: + - name: SINK + displayName: Sink + value: stdout + - name: REPLICAS + displayName: Replicas + value: "1" + - name: IMAGE + displayName: Image + value: "docker.io/openshift/origin-logging-eventrouter:latest" + - name: MEMORY + displayName: Memory + value: "128Mi" + - name: CPU + displayName: CPU + value: "100m" + - name: NAMESPACE + displayName: Namespace + value: default diff --git a/roles/openshift_logging_eventrouter/vars/main.yaml b/roles/openshift_logging_eventrouter/vars/main.yaml new file mode 100644 index 000000000..bdf561fe3 --- /dev/null +++ b/roles/openshift_logging_eventrouter/vars/main.yaml @@ -0,0 +1,2 @@ +--- +__eventrouter_sinks: ["glog", "stdout"] diff --git a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/fluent.conf.j2 index 46de94d60..6e07b403a 100644 --- a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 +++ b/roles/openshift_logging_fluentd/templates/fluent.conf.j2 @@ -49,7 +49,9 @@ @include configs.d/openshift/filter-viaq-data-model.conf @include configs.d/openshift/filter-post-*.conf ## +</label> +<label @OUTPUT> ## matches @include configs.d/openshift/output-pre-*.conf @include configs.d/openshift/output-operations.conf diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 index 1c0d1089f..b5f27b60d 100644 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -118,6 +118,56 @@ spec: - name: "MUX_CLIENT_MODE" value: "{{ openshift_logging_mux_client_mode }}" {% endif %} +{% if openshift_logging_install_eventrouter is defined and openshift_logging_install_eventrouter %} + - name: "TRANSFORM_EVENTS" + value: "true" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog is defined and openshift_logging_fluentd_remote_syslog %} + - name: USE_REMOTE_SYSLOG + value: "true" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_host is defined %} + - name: REMOTE_SYSLOG_HOST + value: "{{ openshift_logging_fluentd_remote_syslog_host }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_port is defined %} + - name: REMOTE_SYSLOG_PORT + value: "{{ openshift_logging_fluentd_remote_syslog_port }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_severity is defined %} + - name: REMOTE_SYSLOG_SEVERITY + value: "{{ openshift_logging_fluentd_remote_syslog_severity }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_facility is defined %} + - name: REMOTE_SYSLOG_FACILITY + value: "{{ openshift_logging_fluentd_remote_syslog_facility }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_remove_tag_prefix is defined %} + - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX + value: "{{ openshift_logging_fluentd_remote_syslog_remove_tag_prefix }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_tag_key is defined %} + - name: REMOTE_SYSLOG_TAG_KEY + value: "{{ openshift_logging_fluentd_remote_syslog_tag_key }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_use_record is defined %} + - name: REMOTE_SYSLOG_USE_RECORD + value: "{{ openshift_logging_fluentd_remote_syslog_use_record }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_payload_key is defined %} + - name: REMOTE_SYSLOG_PAYLOAD_KEY + value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}" +{% endif %} + volumes: - name: runlogjournal hostPath: diff --git a/roles/openshift_logging_mux/files/fluent.conf b/roles/openshift_logging_mux/files/fluent.conf index aeaa705ee..bf61c9811 100644 --- a/roles/openshift_logging_mux/files/fluent.conf +++ b/roles/openshift_logging_mux/files/fluent.conf @@ -25,7 +25,9 @@ @include configs.d/openshift/filter-viaq-data-model.conf @include configs.d/openshift/filter-post-*.conf ## +</label> +<label @OUTPUT> ## matches @include configs.d/openshift/output-pre-*.conf @include configs.d/openshift/output-operations.conf diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2 index ff18d3270..4cc48139f 100644 --- a/roles/openshift_logging_mux/templates/mux.j2 +++ b/roles/openshift_logging_mux/templates/mux.j2 @@ -119,6 +119,52 @@ spec: resource: limits.memory - name: "FILE_BUFFER_LIMIT" value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}" + +{% if openshift_logging_mux_remote_syslog is defined and openshift_logging_mux_remote_syslog %} + - name: USE_REMOTE_SYSLOG + value: "true" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_host is defined %} + - name: REMOTE_SYSLOG_HOST + value: "{{ openshift_logging_mux_remote_syslog_host }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_port is defined %} + - name: REMOTE_SYSLOG_PORT + value: "{{ openshift_logging_mux_remote_syslog_port }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_severity is defined %} + - name: REMOTE_SYSLOG_SEVERITY + value: "{{ openshift_logging_mux_remote_syslog_severity }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_facility is defined %} + - name: REMOTE_SYSLOG_FACILITY + value: "{{ openshift_logging_mux_remote_syslog_facility }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_remove_tag_prefix is defined %} + - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX + value: "{{ openshift_logging_mux_remote_syslog_remove_tag_prefix }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_tag_key is defined %} + - name: REMOTE_SYSLOG_TAG_KEY + value: "{{ openshift_logging_mux_remote_syslog_tag_key }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_use_record is defined %} + - name: REMOTE_SYSLOG_USE_RECORD + value: "{{ openshift_logging_mux_remote_syslog_use_record }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_payload_key is defined %} + - name: REMOTE_SYSLOG_PAYLOAD_KEY + value: "{{ openshift_logging_mux_remote_syslog_payload_key }}" +{% endif %} + volumes: - name: config configMap: diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index 86fa57b50..2dcc56e3f 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -1,4 +1,4 @@ -OpenShift/Atomic Enterprise Master +OpenShift Master ================================== Master service installation diff --git a/roles/openshift_master_certificates/meta/main.yml b/roles/openshift_master_certificates/meta/main.yml index 018186e86..300b2cbff 100644 --- a/roles/openshift_master_certificates/meta/main.yml +++ b/roles/openshift_master_certificates/meta/main.yml @@ -12,6 +12,4 @@ galaxy_info: categories: - cloud - system -dependencies: -- role: openshift_master_facts -- role: openshift_ca +dependencies: [] diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml index a80313505..d0dcdae4b 100644 --- a/roles/openshift_master_facts/defaults/main.yml +++ b/roles/openshift_master_facts/defaults/main.yml @@ -1,5 +1,5 @@ --- -openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}" +openshift_master_default_subdomain: "router.default.svc.cluster.local" openshift_master_admission_plugin_config: openshift.io/ImagePolicy: configuration: diff --git a/roles/openshift_master_facts/lookup_plugins/oo_option.py b/roles/openshift_master_facts/lookup_plugins/oo_option.py deleted file mode 120000 index 5ae43f8dd..000000000 --- a/roles/openshift_master_facts/lookup_plugins/oo_option.py +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins/oo_option.py
\ No newline at end of file diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index fa228af2a..a95570d38 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -1,5 +1,4 @@ --- - # Ensure the default sub-domain is set: - name: Migrate legacy osm_default_subdomain fact set_fact: diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index ed0182ba8..8da74430f 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -1,7 +1,6 @@ --- openshift_metrics_start_cluster: True openshift_metrics_install_metrics: False -openshift_metrics_uninstall_metrics: False openshift_metrics_startup_timeout: 500 openshift_metrics_hawkular_replicas: 1 @@ -61,3 +60,6 @@ openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_mod openshift_metrics_hawkular_user_write_access: False openshift_metrics_heapster_allowed_users: system:master-proxy + +openshift_metrics_cassandra_enable_prometheus_endpoint: True +openshift_metrics_hawkular_enable_prometheus_endpoint: True diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index 0461039fc..10509fc1e 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -45,11 +45,11 @@ - include: install_metrics.yaml when: - - openshift_metrics_install_metrics | default(false) | bool + - openshift_metrics_install_metrics | bool - include: uninstall_metrics.yaml when: - - openshift_metrics_uninstall_metrics | default(false) | bool + - not openshift_metrics_install_metrics | bool - include: uninstall_hosa.yaml when: not openshift_metrics_install_hawkular_agent | bool diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 index fc82f49b1..6f341bcfb 100644 --- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 @@ -56,6 +56,8 @@ spec: value: "/cassandra_data" - name: JVM_OPTS value: "-Dcassandra.commitlog.ignorereplayerrors=true" + - name: ENABLE_PROMETHEUS_ENDPOINT + value: "{{ openshift_metrics_cassandra_enable_prometheus_endpoint }}" - name: TRUSTSTORE_NODES_AUTHORITIES value: "/hawkular-cassandra-certs/tls.peer.truststore.crt" - name: TRUSTSTORE_CLIENT_AUTHORITIES diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 index 9a9363075..59f7fb44a 100644 --- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 @@ -55,6 +55,7 @@ spec: - "-Dcom.datastax.driver.FORCE_NIO=true" - "-DKUBERNETES_MASTER_URL={{openshift_metrics_master_url}}" - "-DUSER_WRITE_ACCESS={{openshift_metrics_hawkular_user_write_access}}" + - "-Dhawkular.metrics.jmx-reporting-enabled" env: - name: POD_NAMESPACE valueFrom: @@ -66,6 +67,8 @@ spec: value: "{{ 17 | oo_random_word }}" - name: TRUSTSTORE_AUTHORITIES value: "/hawkular-metrics-certs/tls.truststore.crt" + - name: ENABLE_PROMETHEUS_ENDPOINT + value: "{{ openshift_metrics_hawkular_enable_prometheus_endpoint }}" - name: OPENSHIFT_KUBE_PING_NAMESPACE valueFrom: fieldRef: diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml index 68cdf06fe..5a1728de5 100644 --- a/roles/openshift_metrics/vars/openshift-enterprise.yml +++ b/roles/openshift_metrics/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_metrics_image_prefix: "registry.access.redhat.com/openshift3/" -__openshift_metrics_image_version: "v3.6" +__openshift_metrics_image_version: "v3.7" diff --git a/roles/openshift_named_certificates/defaults/main.yml b/roles/openshift_named_certificates/defaults/main.yml new file mode 100644 index 000000000..a32e385ec --- /dev/null +++ b/roles/openshift_named_certificates/defaults/main.yml @@ -0,0 +1,6 @@ +--- +openshift_ca_config_dir: "{{ openshift.common.config_base }}/master" +openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt" +openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key" +openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt" +openshift_version: "{{ openshift_pkg_version | default('') }}" diff --git a/roles/openshift_named_certificates/tasks/named_certificates.yml b/roles/openshift_named_certificates/tasks/named_certificates.yml deleted file mode 100644 index 7b097b443..000000000 --- a/roles/openshift_named_certificates/tasks/named_certificates.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- name: Clear named certificates - file: - path: "{{ named_certs_dir }}" - state: absent - when: overwrite_named_certs | bool - -- name: Ensure named certificate directory exists - file: - path: "{{ named_certs_dir }}" - state: directory - mode: 0700 - -- name: Land named certificates - copy: - src: "{{ item.certfile }}" - dest: "{{ named_certs_dir }}" - with_items: "{{ openshift_master_named_certificates | default([]) }}" - -- name: Land named certificate keys - copy: - src: "{{ item.keyfile }}" - dest: "{{ named_certs_dir }}" - mode: 0600 - with_items: "{{ openshift_master_named_certificates | default([]) }}" - -- name: Land named CA certificates - copy: - src: "{{ item }}" - dest: "{{ named_certs_dir }}" - mode: 0600 - with_items: "{{ openshift_master_named_certificates | default([]) | oo_collect('cafile') }}" diff --git a/roles/openshift_named_certificates/vars/main.yml b/roles/openshift_named_certificates/vars/main.yml index 368e9bdac..7f891441d 100644 --- a/roles/openshift_named_certificates/vars/main.yml +++ b/roles/openshift_named_certificates/vars/main.yml @@ -1,10 +1,4 @@ --- -openshift_ca_config_dir: "{{ openshift.common.config_base }}/master" -openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt" -openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key" -openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt" -openshift_version: "{{ openshift_pkg_version | default('') }}" - overwrite_named_certs: "{{ openshift_master_overwrite_named_certificates | default(false) }}" named_certs_dir: "{{ openshift.common.config_base }}/master/named_certificates/" internal_hostnames: "{{ openshift.common.internal_hostnames }}" diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index 32670b18e..67f697924 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -1,4 +1,4 @@ -OpenShift/Atomic Enterprise Node +OpenShift Node ================================ Node service installation diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index ed3516d04..1214c08e5 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -31,12 +31,9 @@ openshift_node_ami_prep_packages: - python-dbus - PyYAML - yum-utils -- python2-boto -- python2-boto3 - cloud-utils-growpart # gluster - glusterfs-fuse -- heketi-client # nfs - nfs-utils - flannel diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index ce5ecb9d0..5bc7b9869 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -17,7 +17,5 @@ dependencies: - role: lib_os_firewall - role: openshift_clock - role: openshift_docker -- role: openshift_node_certificates - when: not openshift_node_bootstrap - role: openshift_cloud_provider - role: openshift_node_dnsmasq diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 2759188f3..e3898b520 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -46,6 +46,22 @@ notify: - restart node +- name: Configure AWS Cloud Provider Settings + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + create: true + with_items: + - regex: '^AWS_ACCESS_KEY_ID=' + line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}" + - regex: '^AWS_SECRET_ACCESS_KEY=' + line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" + no_log: True + when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined + notify: + - restart node + # Necessary because when you're on a node that's also a master the master will be # restarted after the node restarts docker and it will take up to 60 seconds for # systemd to start the master again diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e82fb42b8..59b8bb76e 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -76,22 +76,6 @@ include: config.yml when: not openshift_node_bootstrap -- name: Configure AWS Cloud Provider Settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - create: true - with_items: - - regex: '^AWS_ACCESS_KEY_ID=' - line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}" - - regex: '^AWS_SECRET_ACCESS_KEY=' - line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" - no_log: True - when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined - notify: - - restart node - #### Storage class plugins here #### - name: NFS storage plugin configuration include: storage_plugins/nfs.yml diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index 0ca44c292..20d7a9539 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -11,4 +11,6 @@ image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}" values: - "DNS_DOMAIN={{ openshift.common.dns_domain }}" + - "DOCKER_SERVICE={{ openshift.docker.service_name }}.service" + - "MASTER_SERVICE={{ openshift.common.service_type }}.service" state: latest diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml index 3d2831742..de396fb4b 100644 --- a/roles/openshift_node/tasks/registry_auth.yml +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -21,4 +21,4 @@ when: - openshift.common.is_containerized | bool - oreg_auth_user is defined - - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or oreg_auth_credentials_replace.changed) | bool + - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml index 93216c1d2..0440bf11a 100644 --- a/roles/openshift_node_certificates/meta/main.yml +++ b/roles/openshift_node_certificates/meta/main.yml @@ -12,5 +12,4 @@ galaxy_info: categories: - cloud - system -dependencies: -- role: openshift_facts +dependencies: [] diff --git a/roles/openshift_node_dnsmasq/handlers/main.yml b/roles/openshift_node_dnsmasq/handlers/main.yml index b4a0c3583..9f98126a0 100644 --- a/roles/openshift_node_dnsmasq/handlers/main.yml +++ b/roles/openshift_node_dnsmasq/handlers/main.yml @@ -3,6 +3,7 @@ systemd: name: NetworkManager state: restarted + enabled: True - name: restart dnsmasq systemd: diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml index d5fda7bd0..8a7da66c2 100644 --- a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml +++ b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml @@ -1,2 +1,11 @@ --- - fail: msg="Currently, NetworkManager must be installed and enabled prior to installation." + when: not openshift_node_bootstrap | bool + +- name: Install NetworkManager during node_bootstrap provisioning + package: + name: NetworkManager + state: present + notify: restart NetworkManager + +- include: ./network-manager.yml diff --git a/roles/openshift_node_facts/filter_plugins/filters.py b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py index 69069f2dc..69069f2dc 100644 --- a/roles/openshift_node_facts/filter_plugins/filters.py +++ b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml index fd4c49504..0d5fa664c 100644 --- a/roles/openshift_node_facts/tasks/main.yml +++ b/roles/openshift_node_facts/tasks/main.yml @@ -1,10 +1,4 @@ --- -- set_fact: - openshift_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}" - when: - - openshift_node_debug_level is not defined - - lookup('oo_option', 'openshift_node_debug_level') != "" - - name: Set node facts openshift_facts: role: "{{ item.role }}" @@ -20,7 +14,7 @@ debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" - labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}" + labels: "{{ openshift_node_labels | default(None) }}" registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}" schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md index 5ad994df9..c7c0ff34a 100644 --- a/roles/openshift_node_upgrade/README.md +++ b/roles/openshift_node_upgrade/README.md @@ -1,4 +1,4 @@ -OpenShift/Atomic Enterprise Node upgrade +OpenShift Node upgrade ========= Role responsible for a single node upgrade. diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml index 3d2831742..de396fb4b 100644 --- a/roles/openshift_node_upgrade/tasks/registry_auth.yml +++ b/roles/openshift_node_upgrade/tasks/registry_auth.yml @@ -21,4 +21,4 @@ when: - openshift.common.is_containerized | bool - oreg_auth_user is defined - - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or oreg_auth_credentials_replace.changed) | bool + - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool diff --git a/roles/openshift_repos/tasks/centos_repos.yml b/roles/openshift_repos/tasks/centos_repos.yml new file mode 100644 index 000000000..7dc15af2a --- /dev/null +++ b/roles/openshift_repos/tasks/centos_repos.yml @@ -0,0 +1,25 @@ +--- +# Note: OpenShift repositories under CentOS may be shipped through the +# "centos-release-openshift-origin" package which configures the repository. +# This task matches the file names provided by the package so that they are +# not installed twice in different files and remains idempotent. + +- name: Configure origin gpg keys + copy: + src: "origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS" + dest: "/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS" + notify: refresh cache + +# openshift_release is formatted to a standard string in openshift_version role. +# openshift_release is expected to be in format 'x.y.z...' here. +# Here, we drop the '.' characters and try to match the correct repo template +# for our corresponding openshift_release. +- name: Configure correct origin release repository + template: + src: "{{ item }}" + dest: "/etc/yum.repos.d/{{ (item | basename | splitext)[0] }}" + with_first_found: + - "CentOS-OpenShift-Origin{{ (openshift_release | default('')).split('.') | join('') }}.repo.j2" + - "CentOS-OpenShift-Origin{{ ((openshift_release | default('')).split('.') | join(''))[0:2] }}.repo.j2" + - "CentOS-OpenShift-Origin.repo.j2" + notify: refresh cache diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index f972c0fd9..d41245093 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -30,30 +30,13 @@ - when: r_openshift_repos_has_run is not defined block: - # Note: OpenShift repositories under CentOS may be shipped through the - # "centos-release-openshift-origin" package which configures the repository. - # This task matches the file names provided by the package so that they are - # not installed twice in different files and remains idempotent. - - name: Configure origin repositories and gpg keys if needed - copy: - src: "{{ item.src }}" - dest: "{{ item.dest }}" - with_items: - - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS - dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS - - src: origin/repos/openshift-ansible-centos-paas-sig.repo - dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo - notify: refresh cache + - include: centos_repos.yml when: - ansible_os_family == "RedHat" - ansible_distribution != "Fedora" - openshift_deployment_type == 'origin' - openshift_enable_origin_repo | default(true) | bool - - name: Enable centos-openshift-origin-testing repository - command: yum-config-manager --enable centos-openshift-origin-testing - when: openshift_repos_enable_testing | bool - - name: Ensure clean repo cache in the event repos have been changed manually debug: msg: "First run of openshift_repos" diff --git a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo b/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2 index 09364c26f..b0c036e7c 100644 --- a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2 @@ -8,7 +8,7 @@ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS [centos-openshift-origin-testing] name=CentOS OpenShift Origin Testing baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin/ -enabled=0 +enabled={{ 1 if openshift_repos_enable_testing else 0 }} gpgcheck=0 gpgkey=file:///etc/pki/rpm-gpg/openshift-ansible-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 new file mode 100644 index 000000000..97e855d58 --- /dev/null +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 @@ -0,0 +1,27 @@ +[centos-openshift-origin14] +name=CentOS OpenShift Origin +baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin14/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin14-testing] +name=CentOS OpenShift Origin Testing +baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin14/ +enabled={{ 1 if openshift_repos_enable_testing else 0 }} +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin14-debuginfo] +name=CentOS OpenShift Origin DebugInfo +baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin14-source] +name=CentOS OpenShift Origin Source +baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin14/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 new file mode 100644 index 000000000..5e756e680 --- /dev/null +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 @@ -0,0 +1,27 @@ +[centos-openshift-origin15] +name=CentOS OpenShift Origin +baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin15/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin15-testing] +name=CentOS OpenShift Origin Testing +baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin15/ +enabled={{ 1 if openshift_repos_enable_testing else 0 }} +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin15-debuginfo] +name=CentOS OpenShift Origin DebugInfo +baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin15-source] +name=CentOS OpenShift Origin Source +baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin15/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 new file mode 100644 index 000000000..7050c95f5 --- /dev/null +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 @@ -0,0 +1,27 @@ +[centos-openshift-origin36] +name=CentOS OpenShift Origin +baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin36/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin36-testing] +name=CentOS OpenShift Origin Testing +baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin36/ +enabled={{ 1 if openshift_repos_enable_testing else 0 }} +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin36-debuginfo] +name=CentOS OpenShift Origin DebugInfo +baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin36-source] +name=CentOS OpenShift Origin Source +baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin36/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py deleted file mode 100644 index d42c9bdb9..000000000 --- a/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py +++ /dev/null @@ -1,25 +0,0 @@ -''' - Openshift Logging class that provides useful filters used in Logging. - - This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml -''' - - -def map_from_pairs(source, delim="="): - ''' Returns a dict given the source and delim delimited ''' - if source == '': - return dict() - - return dict(item.split(delim) for item in source.split(",")) - - -# pylint: disable=too-few-public-methods -class FilterModule(object): - ''' OpenShift Logging Filters ''' - - # pylint: disable=no-self-use, too-few-public-methods - def filters(self): - ''' Returns the names of the filters provided by this class ''' - return { - 'map_from_pairs': map_from_pairs - } diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py new file mode 100644 index 000000000..72c47b8ee --- /dev/null +++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py @@ -0,0 +1,44 @@ +''' + Openshift Sanitize inventory class that provides useful filters used in Logging. +''' + + +import re + + +# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(item.split(delim) for item in source.split(",")) + + +def vars_with_pattern(source, pattern=""): + ''' Returns a list of variables whose name matches the given pattern ''' + if source == '': + return list() + + var_list = list() + + var_pattern = re.compile(pattern) + + for item in source: + if var_pattern.match(item): + var_list.append(item) + + return var_list + + +# pylint: disable=too-few-public-methods +class FilterModule(object): + ''' OpenShift Logging Filters ''' + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + ''' Returns the names of the filters provided by this class ''' + return { + 'map_from_pairs': map_from_pairs, + 'vars_with_pattern': vars_with_pattern + } diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml index e52ab5f6d..e534e0cca 100644 --- a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml +++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml @@ -35,10 +35,10 @@ - set_fact: openshift_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_storage_volume_size if openshift_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" + openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_storage_volume_size | default('10Gi') if openshift_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" openshift_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}" openshift_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" + openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size | default('10Gi') if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" openshift_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}" openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}" diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml index 24e44ea85..39bf1780a 100644 --- a/roles/openshift_sanitize_inventory/tasks/unsupported.yml +++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml @@ -10,3 +10,25 @@ Starting in 3.6 openshift_use_dnsmasq must be true or critical features will not function. This also means that NetworkManager must be installed enabled and responsible for management of the primary interface. + +- set_fact: + __using_dynamic: True + when: + - hostvars[inventory_hostname][item] in ['dynamic'] + with_items: + - "{{ hostvars[inventory_hostname] | vars_with_pattern(pattern='openshift_.*_storage_kind') }}" + +- name: Ensure that dynamic provisioning is set if using dynamic storage + when: + - dynamic_volumes_check | default(true) | bool + - not openshift_master_dynamic_provisioning_enabled | default(false) | bool + - not openshift_cloudprovider_kind is defined + - __using_dynamic is defined and __using_dynamic | bool + fail: + msg: |- + Using a storage kind of 'dynamic' without enabling dynamic provisioning nor + setting a cloud provider will cause generated PVCs to not be able to bind as + intended. Either update to not use a dynamic storage or set + openshift_master_dynamic_provisioning_enabled to True and set an + openshift_cloudprovider_kind. You can disable this check with + 'dynamic_volumes_check=False'. diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js deleted file mode 100644 index d0a9f11dc..000000000 --- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js +++ /dev/null @@ -1,2 +0,0 @@ -// empty file so that the master-config can still point to a file that exists -// this file will be replaced by the template service broker role if enabled diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index faf1aea97..e202ae173 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -23,10 +23,22 @@ name: "kube-service-catalog" node_selector: "" -- name: Make kube-service-catalog project network global - command: > - oc adm pod-network make-projects-global kube-service-catalog - when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant' +- when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant' + block: + - name: Waiting for netnamespace kube-service-catalog to be ready + oc_obj: + kind: netnamespace + name: kube-service-catalog + state: list + register: get_output + until: not get_output.results.stderr is defined + retries: 30 + delay: 1 + changed_when: false + + - name: Make kube-service-catalog project network global + command: > + oc adm pod-network make-projects-global kube-service-catalog - include: generate_certs.yml diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml deleted file mode 100644 index 300a7db62..000000000 --- a/roles/openshift_service_catalog/tasks/wire_aggregator.yml +++ /dev/null @@ -1,215 +0,0 @@ ---- -- name: Make temp cert dir - command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX - register: certtemp - changed_when: False - -- name: Check for First Master Aggregator Signer cert - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: first_proxy_ca_crt - changed_when: false - delegate_to: "{{ first_master }}" - -- name: Check for First Master Aggregator Signer key - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: first_proxy_ca_key - changed_when: false - delegate_to: "{{ first_master }}" - -# TODO: this currently has a bug where hostnames are required -- name: Creating First Master Aggregator signer certs - command: > - {{ hostvars[first_master].openshift.common.client_binary }} adm ca create-signer-cert - --cert=/etc/origin/master/front-proxy-ca.crt - --key=/etc/origin/master/front-proxy-ca.key - --serial=/etc/origin/master/ca.serial.txt - delegate_to: "{{ first_master }}" - when: - - not first_proxy_ca_crt.stat.exists - - not first_proxy_ca_key.stat.exists - -- name: Check for Aggregator Signer cert - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: proxy_ca_crt - changed_when: false - -- name: Check for Aggregator Signer key - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: proxy_ca_key - changed_when: false - -- name: Copy Aggregator Signer certs from first master - fetch: - src: "/etc/origin/master/{{ item }}" - dest: "{{ certtemp.stdout }}/{{ item }}" - flat: yes - with_items: - - front-proxy-ca.crt - - front-proxy-ca.key - delegate_to: "{{ first_master }}" - when: - - not proxy_ca_key.stat.exists - - not proxy_ca_crt.stat.exists - -- name: Copy Aggregator Signer certs to host - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/{{ item }}" - with_items: - - front-proxy-ca.crt - - front-proxy-ca.key - when: - - not proxy_ca_key.stat.exists - - not proxy_ca_crt.stat.exists - -# oc_adm_ca_server_cert: -# cert: /etc/origin/master/front-proxy-ca.crt -# key: /etc/origin/master/front-proxy-ca.key - -- name: Check for first master api-client config - stat: - path: /etc/origin/master/aggregator-front-proxy.kubeconfig - register: first_front_proxy_kubeconfig - delegate_to: "{{ first_master }}" - run_once: true - -# create-api-client-config generates a ca.crt file which will -# overwrite the OpenShift CA certificate. Generate the aggregator -# kubeconfig in a temporary directory and then copy files into the -# master config dir to avoid overwriting ca.crt. -- block: - - name: Create first master api-client config for Aggregator - command: > - {{ hostvars[first_master].openshift.common.client_binary }} adm create-api-client-config - --certificate-authority=/etc/origin/master/front-proxy-ca.crt - --signer-cert=/etc/origin/master/front-proxy-ca.crt - --signer-key=/etc/origin/master/front-proxy-ca.key - --user aggregator-front-proxy - --client-dir={{ certtemp.stdout }} - --signer-serial=/etc/origin/master/ca.serial.txt - delegate_to: "{{ first_master }}" - run_once: true - - name: Copy first master api-client config for Aggregator - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/" - remote_src: true - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - delegate_to: "{{ first_master }}" - run_once: true - when: - - not first_front_proxy_kubeconfig.stat.exists - -- name: Check for api-client config - stat: - path: /etc/origin/master/aggregator-front-proxy.kubeconfig - register: front_proxy_kubeconfig - -- name: Copy api-client config from first master - fetch: - src: "/etc/origin/master/{{ item }}" - dest: "{{ certtemp.stdout }}/{{ item }}" - flat: yes - delegate_to: "{{ first_master }}" - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - when: - - not front_proxy_kubeconfig.stat.exists - -- name: Copy api-client config to host - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/{{ item }}" - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - when: - - not front_proxy_kubeconfig.stat.exists - -- name: copy tech preview extension file for service console UI - copy: - src: openshift-ansible-catalog-console.js - dest: /etc/origin/master/openshift-ansible-catalog-console.js - -- name: Update master config - yedit: - state: present - src: /etc/origin/master/master-config.yaml - edits: - - key: aggregatorConfig.proxyClientInfo.certFile - value: aggregator-front-proxy.crt - - key: aggregatorConfig.proxyClientInfo.keyFile - value: aggregator-front-proxy.key - - key: authConfig.requestHeader.clientCA - value: front-proxy-ca.crt - - key: authConfig.requestHeader.clientCommonNames - value: [aggregator-front-proxy] - - key: authConfig.requestHeader.usernameHeaders - value: [X-Remote-User] - - key: authConfig.requestHeader.groupHeaders - value: [X-Remote-Group] - - key: authConfig.requestHeader.extraHeaderPrefixes - value: [X-Remote-Extra-] - - key: assetConfig.extensionScripts - value: [/etc/origin/master/openshift-ansible-catalog-console.js] - - key: kubernetesMasterConfig.apiServerArguments.runtime-config - value: [apis/settings.k8s.io/v1alpha1=true] - - key: admissionConfig.pluginConfig.PodPreset.configuration.kind - value: DefaultAdmissionConfig - - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion - value: v1 - - key: admissionConfig.pluginConfig.PodPreset.configuration.disable - value: false - register: yedit_output - -#restart master serially here -- name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: - - yedit_output.changed - - openshift.master.cluster_method == 'native' - -- name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted - when: - - yedit_output.changed - - openshift.master.cluster_method == 'native' - -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false - when: - - yedit_output.changed - -- name: Delete temp directory - file: - name: "{{ certtemp.stdout }}" - state: absent - changed_when: False diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index 3f6dab78b..51724f979 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -18,6 +18,17 @@ node_selector: "{% if glusterfs_use_default_selector %}{{ omit }}{% endif %}" when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass +- name: Add namespace service accounts to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}" + resource_kind: scc + resource_name: privileged + state: present + with_items: + - 'default' + - 'router' + when: glusterfs_is_native or glusterfs_heketi_is_native + - name: Delete pre-existing heketi resources oc_obj: namespace: "{{ glusterfs_namespace }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 8c3e31fc9..932d06038 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -55,16 +55,6 @@ - glusterfs_wipe - item.stdout_lines | count > 0 -- name: Add service accounts to privileged SCC - oc_adm_policy_user: - user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}" - resource_kind: scc - resource_name: privileged - state: present - with_items: - - 'default' - - 'router' - - name: Label GlusterFS nodes oc_label: name: "{{ hostvars[item].openshift.node.nodename }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 54a6dd7c3..074904bec 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -1,6 +1,6 @@ --- - name: Create heketi DB volume - command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json" + command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image}}:{{ glusterfs_heketi_version }} --listfile /tmp/heketi-storage.json" register: setup_storage - name: Copy heketi-storage list diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml index 9738929d2..fa74c9953 100644 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ b/roles/rhel_subscribe/tasks/enterprise.yml @@ -7,7 +7,7 @@ when: deployment_type == 'openshift-enterprise' - set_fact: - ose_version: "{{ lookup('oo_option', 'ose_version') | default(default_ose_version, True) }}" + ose_version: "{{ lookup('env', 'ose_version') | default(default_ose_version, True) }}" - fail: msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type" diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index c43e5513d..b06f51908 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -4,10 +4,10 @@ # to make it able to enable repositories - set_fact: - rhel_subscription_pool: "{{ lookup('oo_option', 'rhel_subscription_pool') | default(rhsub_pool, True) | default('Red Hat OpenShift Container Platform, Premium*', True) }}" - rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}" - rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}" - rhel_subscription_server: "{{ lookup('oo_option', 'rhel_subscription_server') | default(rhsub_server) }}" + rhel_subscription_pool: "{{ lookup('env', 'rhel_subscription_pool') | default(rhsub_pool | default('Red Hat OpenShift Container Platform, Premium*')) }}" + rhel_subscription_user: "{{ lookup('env', 'rhel_subscription_user') | default(rhsub_user | default(omit, True)) }}" + rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}" + rhel_subscription_server: "{{ lookup('env', 'rhel_subscription_server') | default(rhsub_server | default(omit, True)) }}" - fail: msg: "This role is only supported for Red Hat hosts" diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index a9d22aa06..f5fd6487c 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -27,6 +27,7 @@ with_items: - "{{ __tsb_template_file }}" - "{{ __tsb_rbac_file }}" + - "{{ __tsb_broker_file }}" - name: Apply template file shell: > @@ -42,6 +43,33 @@ src: openshift-ansible-catalog-console.js dest: /etc/origin/master/openshift-ansible-catalog-console.js +# Check that the TSB is running +- name: Verify that TSB is running + command: > + curl -k https://apiserver.openshift-template-service-broker.svc/healthz + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_health + until: api_health.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false + +- set_fact: + openshift_master_config_dir: "{{ openshift.common.config_base }}/master" + when: openshift_master_config_dir is undefined + +- slurp: + src: "{{ openshift_master_config_dir }}/ca.crt" + register: __ca_bundle + +# Register with broker +- name: Register TSB with broker + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | oc apply -f - + - file: state: absent name: "{{ mktemp.stdout }}" diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml index 207dd9bdb..f3afe65ed 100644 --- a/roles/template_service_broker/tasks/remove.yml +++ b/roles/template_service_broker/tasks/remove.yml @@ -6,11 +6,18 @@ - copy: src: "{{ __tsb_files_location }}/{{ item }}" - dest: "{{ mktemp.stdout }}/{{ __tsb_template_file }}" + dest: "{{ mktemp.stdout }}/{{ item }}" + with_items: + - "{{ __tsb_template_file }}" + - "{{ __tsb_broker_file }}" + +- name: Delete TSB broker + shell: > + oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | oc delete -f - - name: Delete TSB objects shell: > - oc process -f "{{ __tsb_files_location }}/{{ __tsb_template_file }}" | kubectl delete -f - + oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | kubectl delete -f - - name: empty out tech preview extension file for service console UI copy: diff --git a/roles/template_service_broker/vars/main.yml b/roles/template_service_broker/vars/main.yml index 372ab8f6f..a65340f16 100644 --- a/roles/template_service_broker/vars/main.yml +++ b/roles/template_service_broker/vars/main.yml @@ -4,3 +4,4 @@ __tsb_files_location: "../../../files/origin-components/" __tsb_template_file: "apiserver-template.yaml" __tsb_config_file: "apiserver-config.yaml" __tsb_rbac_file: "rbac-template.yaml" +__tsb_broker_file: "template-service-broker-registration.yaml" |